6f7840cf
tangwang
refactor: rename ...
|
1
2
3
4
5
6
7
8
9
10
11
|
#!/usr/bin/env python3
"""
商品内容理解与属性补充模块(product_enrich)
提供基于 LLM 的商品锚文本 / 语义属性 / 标签等分析能力,
供 indexer 与 API 在内存中调用(不再负责 CSV 读写)。
"""
import os
import json
import logging
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
12
|
import re
|
6f7840cf
tangwang
refactor: rename ...
|
13
14
|
import time
import hashlib
|
41f0b2e9
tangwang
product_enrich支持并发
|
15
16
|
import uuid
import threading
|
36516857
tangwang
feat(product_enri...
|
17
|
from dataclasses import dataclass, field
|
a73a751f
tangwang
enrich
|
18
|
from collections import OrderedDict
|
6f7840cf
tangwang
refactor: rename ...
|
19
|
from datetime import datetime
|
41f0b2e9
tangwang
product_enrich支持并发
|
20
|
from concurrent.futures import ThreadPoolExecutor
|
6f7840cf
tangwang
refactor: rename ...
|
21
22
23
24
25
26
|
from typing import List, Dict, Tuple, Any, Optional
import redis
import requests
from pathlib import Path
|
86d8358b
tangwang
config optimize
|
27
|
from config.loader import get_app_config
|
6f7840cf
tangwang
refactor: rename ...
|
28
|
from config.tenant_config_loader import SOURCE_LANG_CODE_MAP
|
a73a751f
tangwang
enrich
|
29
30
31
32
33
|
from indexer.product_enrich_prompts import (
SYSTEM_MESSAGE,
USER_INSTRUCTION_TEMPLATE,
LANGUAGE_MARKDOWN_TABLE_HEADERS,
SHARED_ANALYSIS_INSTRUCTION,
|
36516857
tangwang
feat(product_enri...
|
34
35
36
|
TAXONOMY_LANGUAGE_MARKDOWN_TABLE_HEADERS,
TAXONOMY_MARKDOWN_TABLE_HEADERS_EN,
TAXONOMY_SHARED_ANALYSIS_INSTRUCTION,
|
a73a751f
tangwang
enrich
|
37
|
)
|
6f7840cf
tangwang
refactor: rename ...
|
38
39
40
|
# 配置
BATCH_SIZE = 20
|
41f0b2e9
tangwang
product_enrich支持并发
|
41
42
43
|
# enrich-content LLM 批次并发 worker 上限(线程池;仅对 uncached batch 并发)
_APP_CONFIG = get_app_config()
CONTENT_UNDERSTANDING_MAX_WORKERS = int(_APP_CONFIG.product_enrich.max_workers)
|
6f7840cf
tangwang
refactor: rename ...
|
44
45
46
47
48
49
50
51
52
|
# 华北2(北京):https://dashscope.aliyuncs.com/compatible-mode/v1
# 新加坡:https://dashscope-intl.aliyuncs.com/compatible-mode/v1
# 美国(弗吉尼亚):https://dashscope-us.aliyuncs.com/compatible-mode/v1
API_BASE_URL = "https://dashscope-us.aliyuncs.com/compatible-mode/v1"
MODEL_NAME = "qwen-flash"
API_KEY = os.environ.get("DASHSCOPE_API_KEY")
MAX_RETRIES = 3
RETRY_DELAY = 5 # 秒
REQUEST_TIMEOUT = 180 # 秒
|
a73a751f
tangwang
enrich
|
53
|
LOGGED_SHARED_CONTEXT_CACHE_SIZE = 256
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
54
55
56
57
|
PROMPT_INPUT_MIN_ZH_CHARS = 20
PROMPT_INPUT_MAX_ZH_CHARS = 100
PROMPT_INPUT_MIN_WORDS = 16
PROMPT_INPUT_MAX_WORDS = 80
|
6f7840cf
tangwang
refactor: rename ...
|
58
59
60
61
62
63
64
65
66
67
|
# 日志路径
OUTPUT_DIR = Path("output_logs")
LOG_DIR = OUTPUT_DIR / "logs"
# 设置独立日志(不影响全局 indexer.log)
LOG_DIR.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
log_file = LOG_DIR / f"product_enrich_{timestamp}.log"
verbose_log_file = LOG_DIR / "product_enrich_verbose.log"
|
a73a751f
tangwang
enrich
|
68
|
_logged_shared_context_keys: "OrderedDict[str, None]" = OrderedDict()
|
41f0b2e9
tangwang
product_enrich支持并发
|
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
|
_logged_shared_context_lock = threading.Lock()
_content_understanding_executor: Optional[ThreadPoolExecutor] = None
_content_understanding_executor_lock = threading.Lock()
def _get_content_understanding_executor() -> ThreadPoolExecutor:
"""
使用模块级单例线程池,避免同一进程内多次请求叠加创建线程池导致并发失控。
"""
global _content_understanding_executor
with _content_understanding_executor_lock:
if _content_understanding_executor is None:
_content_understanding_executor = ThreadPoolExecutor(
max_workers=CONTENT_UNDERSTANDING_MAX_WORKERS,
thread_name_prefix="product-enrich-llm",
)
return _content_understanding_executor
|
6f7840cf
tangwang
refactor: rename ...
|
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
|
# 主日志 logger:执行流程、批次信息等
logger = logging.getLogger("product_enrich")
logger.setLevel(logging.INFO)
if not logger.handlers:
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler(log_file, encoding="utf-8")
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
# 避免日志向根 logger 传播,防止写入 logs/indexer.log 等其他文件
logger.propagate = False
# 详尽日志 logger:专门记录 LLM 请求与响应
verbose_logger = logging.getLogger("product_enrich_verbose")
verbose_logger.setLevel(logging.INFO)
if not verbose_logger.handlers:
verbose_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
verbose_file_handler = logging.FileHandler(verbose_log_file, encoding="utf-8")
verbose_file_handler.setFormatter(verbose_formatter)
verbose_logger.addHandler(verbose_file_handler)
verbose_logger.propagate = False
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
118
119
|
logger.info("Verbose LLM logs are written to: %s", verbose_log_file)
|
6f7840cf
tangwang
refactor: rename ...
|
120
121
|
# Redis 缓存(用于 anchors / 语义属性)
|
41f0b2e9
tangwang
product_enrich支持并发
|
122
|
_REDIS_CONFIG = _APP_CONFIG.infrastructure.redis
|
86d8358b
tangwang
config optimize
|
123
124
|
ANCHOR_CACHE_PREFIX = _REDIS_CONFIG.anchor_cache_prefix
ANCHOR_CACHE_EXPIRE_DAYS = int(_REDIS_CONFIG.anchor_cache_expire_days)
|
6f7840cf
tangwang
refactor: rename ...
|
125
126
127
128
|
_anchor_redis: Optional[redis.Redis] = None
try:
_anchor_redis = redis.Redis(
|
86d8358b
tangwang
config optimize
|
129
130
131
|
host=_REDIS_CONFIG.host,
port=_REDIS_CONFIG.port,
password=_REDIS_CONFIG.password,
|
6f7840cf
tangwang
refactor: rename ...
|
132
|
decode_responses=True,
|
86d8358b
tangwang
config optimize
|
133
134
135
|
socket_timeout=_REDIS_CONFIG.socket_timeout,
socket_connect_timeout=_REDIS_CONFIG.socket_connect_timeout,
retry_on_timeout=_REDIS_CONFIG.retry_on_timeout,
|
6f7840cf
tangwang
refactor: rename ...
|
136
137
138
139
140
141
142
143
|
health_check_interval=10,
)
_anchor_redis.ping()
logger.info("Redis cache initialized for product anchors and semantic attributes")
except Exception as e:
logger.warning(f"Failed to initialize Redis for anchors cache: {e}")
_anchor_redis = None
|
a73a751f
tangwang
enrich
|
144
145
146
147
148
|
_missing_prompt_langs = sorted(set(SOURCE_LANG_CODE_MAP) - set(LANGUAGE_MARKDOWN_TABLE_HEADERS))
if _missing_prompt_langs:
raise RuntimeError(
f"Missing product_enrich prompt config for languages: {_missing_prompt_langs}"
)
|
6f7840cf
tangwang
refactor: rename ...
|
149
150
|
|
69881ecb
tangwang
相关性调参、enrich内容解析优化
|
151
152
|
# 多值字段分隔:英文逗号、中文逗号、顿号,及历史约定的 ; | / 与空白
_MULTI_VALUE_FIELD_SPLIT_RE = re.compile(r"[,、,;|/\n\t]+")
|
d350861f
tangwang
索引结构修改
|
153
|
_CORE_INDEX_LANGUAGES = ("zh", "en")
|
2703b6ea
tangwang
refactor(indexer)...
|
154
155
|
_DEFAULT_ENRICHMENT_SCOPES = ("generic", "category_taxonomy")
_DEFAULT_CATEGORY_TAXONOMY_PROFILE = "apparel"
|
36516857
tangwang
feat(product_enri...
|
156
|
_CONTENT_ANALYSIS_ATTRIBUTE_FIELD_MAP = (
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
157
158
159
160
161
162
163
164
|
("tags", "enriched_tags"),
("target_audience", "target_audience"),
("usage_scene", "usage_scene"),
("season", "season"),
("key_attributes", "key_attributes"),
("material", "material"),
("features", "features"),
)
|
36516857
tangwang
feat(product_enri...
|
165
|
_CONTENT_ANALYSIS_RESULT_FIELDS = (
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
166
167
168
169
170
171
172
173
174
175
176
|
"title",
"category_path",
"tags",
"target_audience",
"usage_scene",
"season",
"key_attributes",
"material",
"features",
"anchor_text",
)
|
36516857
tangwang
feat(product_enri...
|
177
|
_CONTENT_ANALYSIS_MEANINGFUL_FIELDS = (
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
178
|
"tags",
|
d350861f
tangwang
索引结构修改
|
179
180
181
182
183
184
|
"target_audience",
"usage_scene",
"season",
"key_attributes",
"material",
"features",
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
185
|
"anchor_text",
|
d350861f
tangwang
索引结构修改
|
186
|
)
|
36516857
tangwang
feat(product_enri...
|
187
|
_CONTENT_ANALYSIS_FIELD_ALIASES = {
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
188
189
|
"tags": ("tags", "enriched_tags"),
}
|
36516857
tangwang
feat(product_enri...
|
190
|
_CONTENT_ANALYSIS_QUALITY_FIELDS = ("title", "category_path", "anchor_text")
|
2703b6ea
tangwang
refactor(indexer)...
|
191
|
_APPAREL_TAXONOMY_ATTRIBUTE_FIELD_MAP = (
|
36516857
tangwang
feat(product_enri...
|
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
|
("product_type", "Product Type"),
("target_gender", "Target Gender"),
("age_group", "Age Group"),
("season", "Season"),
("fit", "Fit"),
("silhouette", "Silhouette"),
("neckline", "Neckline"),
("sleeve_length_type", "Sleeve Length Type"),
("sleeve_style", "Sleeve Style"),
("strap_type", "Strap Type"),
("rise_waistline", "Rise / Waistline"),
("leg_shape", "Leg Shape"),
("skirt_shape", "Skirt Shape"),
("length_type", "Length Type"),
("closure_type", "Closure Type"),
("design_details", "Design Details"),
("fabric", "Fabric"),
("material_composition", "Material Composition"),
("fabric_properties", "Fabric Properties"),
("clothing_features", "Clothing Features"),
("functional_benefits", "Functional Benefits"),
("color", "Color"),
("color_family", "Color Family"),
("print_pattern", "Print / Pattern"),
("occasion_end_use", "Occasion / End Use"),
("style_aesthetic", "Style Aesthetic"),
)
|
2703b6ea
tangwang
refactor(indexer)...
|
219
220
|
_APPAREL_TAXONOMY_ANALYSIS_RESULT_FIELDS = tuple(
field_name for field_name, _ in _APPAREL_TAXONOMY_ATTRIBUTE_FIELD_MAP
|
36516857
tangwang
feat(product_enri...
|
221
222
223
224
225
226
227
228
229
230
|
)
@dataclass(frozen=True)
class AnalysisSchema:
name: str
shared_instruction: str
markdown_table_headers: Dict[str, List[str]]
result_fields: Tuple[str, ...]
meaningful_fields: Tuple[str, ...]
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
231
|
cache_version: str = "v1"
|
36516857
tangwang
feat(product_enri...
|
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
|
field_aliases: Dict[str, Tuple[str, ...]] = field(default_factory=dict)
fallback_headers: Optional[List[str]] = None
quality_fields: Tuple[str, ...] = ()
def get_headers(self, target_lang: str) -> Optional[List[str]]:
headers = self.markdown_table_headers.get(target_lang)
if headers:
return headers
if self.fallback_headers:
return self.fallback_headers
return None
_ANALYSIS_SCHEMAS: Dict[str, AnalysisSchema] = {
"content": AnalysisSchema(
name="content",
shared_instruction=SHARED_ANALYSIS_INSTRUCTION,
markdown_table_headers=LANGUAGE_MARKDOWN_TABLE_HEADERS,
result_fields=_CONTENT_ANALYSIS_RESULT_FIELDS,
meaningful_fields=_CONTENT_ANALYSIS_MEANINGFUL_FIELDS,
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
252
|
cache_version="v2",
|
36516857
tangwang
feat(product_enri...
|
253
254
255
|
field_aliases=_CONTENT_ANALYSIS_FIELD_ALIASES,
quality_fields=_CONTENT_ANALYSIS_QUALITY_FIELDS,
),
|
2703b6ea
tangwang
refactor(indexer)...
|
256
257
258
259
260
|
}
_CATEGORY_TAXONOMY_PROFILE_SCHEMAS: Dict[str, AnalysisSchema] = {
"apparel": AnalysisSchema(
name="taxonomy:apparel",
|
36516857
tangwang
feat(product_enri...
|
261
262
|
shared_instruction=TAXONOMY_SHARED_ANALYSIS_INSTRUCTION,
markdown_table_headers=TAXONOMY_LANGUAGE_MARKDOWN_TABLE_HEADERS,
|
2703b6ea
tangwang
refactor(indexer)...
|
263
264
|
result_fields=_APPAREL_TAXONOMY_ANALYSIS_RESULT_FIELDS,
meaningful_fields=_APPAREL_TAXONOMY_ANALYSIS_RESULT_FIELDS,
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
265
|
cache_version="v1",
|
36516857
tangwang
feat(product_enri...
|
266
267
268
269
|
fallback_headers=TAXONOMY_MARKDOWN_TABLE_HEADERS_EN,
),
}
|
2703b6ea
tangwang
refactor(indexer)...
|
270
271
272
273
|
_CATEGORY_TAXONOMY_PROFILE_ATTRIBUTE_FIELD_MAPS: Dict[str, Tuple[Tuple[str, str], ...]] = {
"apparel": _APPAREL_TAXONOMY_ATTRIBUTE_FIELD_MAP,
}
|
36516857
tangwang
feat(product_enri...
|
274
|
|
2703b6ea
tangwang
refactor(indexer)...
|
275
276
277
278
279
|
def _normalize_category_taxonomy_profile(category_taxonomy_profile: Optional[str] = None) -> str:
profile = str(category_taxonomy_profile or _DEFAULT_CATEGORY_TAXONOMY_PROFILE).strip()
if profile not in _CATEGORY_TAXONOMY_PROFILE_SCHEMAS:
raise ValueError(f"Unsupported category_taxonomy_profile: {profile}")
return profile
|
69881ecb
tangwang
相关性调参、enrich内容解析优化
|
280
281
|
|
2703b6ea
tangwang
refactor(indexer)...
|
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
|
def _get_analysis_schema(
analysis_kind: str,
*,
category_taxonomy_profile: Optional[str] = None,
) -> AnalysisSchema:
if analysis_kind == "content":
return _ANALYSIS_SCHEMAS["content"]
if analysis_kind == "taxonomy":
profile = _normalize_category_taxonomy_profile(category_taxonomy_profile)
return _CATEGORY_TAXONOMY_PROFILE_SCHEMAS[profile]
raise ValueError(f"Unsupported analysis_kind: {analysis_kind}")
def _get_taxonomy_attribute_field_map(
category_taxonomy_profile: Optional[str] = None,
) -> Tuple[Tuple[str, str], ...]:
profile = _normalize_category_taxonomy_profile(category_taxonomy_profile)
return _CATEGORY_TAXONOMY_PROFILE_ATTRIBUTE_FIELD_MAPS[profile]
def _normalize_enrichment_scopes(
enrichment_scopes: Optional[List[str]] = None,
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
304
|
) -> Tuple[str, ...]:
|
2703b6ea
tangwang
refactor(indexer)...
|
305
|
requested = _DEFAULT_ENRICHMENT_SCOPES if not enrichment_scopes else tuple(enrichment_scopes)
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
306
307
|
normalized: List[str] = []
seen = set()
|
2703b6ea
tangwang
refactor(indexer)...
|
308
309
310
311
312
|
for enrichment_scope in requested:
scope = str(enrichment_scope).strip()
if scope not in {"generic", "category_taxonomy"}:
raise ValueError(f"Unsupported enrichment_scope: {scope}")
if scope in seen:
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
313
|
continue
|
2703b6ea
tangwang
refactor(indexer)...
|
314
315
|
seen.add(scope)
normalized.append(scope)
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
316
317
318
|
return tuple(normalized)
|
69881ecb
tangwang
相关性调参、enrich内容解析优化
|
319
320
321
322
323
324
325
326
327
328
|
def split_multi_value_field(text: Optional[str]) -> List[str]:
"""将 LLM/业务中的多值字符串拆成短语列表(strip 后去空)。"""
if text is None:
return []
s = str(text).strip()
if not s:
return []
return [p.strip() for p in _MULTI_VALUE_FIELD_SPLIT_RE.split(s) if p.strip()]
|
d350861f
tangwang
索引结构修改
|
329
330
331
332
333
334
335
336
337
338
|
def _append_lang_phrase_map(target: Dict[str, List[str]], lang: str, raw_value: Any) -> None:
parts = split_multi_value_field(raw_value)
if not parts:
return
existing = target.get(lang) or []
merged = list(dict.fromkeys([str(x).strip() for x in existing if str(x).strip()] + parts))
if merged:
target[lang] = merged
|
80f1e036
tangwang
enriched_attribut...
|
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
|
def _get_or_create_named_value_entry(
target: List[Dict[str, Any]],
name: str,
*,
default_value: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
for item in target:
if item.get("name") == name:
value = item.get("value")
if isinstance(value, dict):
return item
break
entry = {"name": name, "value": default_value or {}}
target.append(entry)
return entry
def _append_named_lang_phrase_map(
|
d350861f
tangwang
索引结构修改
|
358
359
360
361
362
|
target: List[Dict[str, Any]],
name: str,
lang: str,
raw_value: Any,
) -> None:
|
80f1e036
tangwang
enriched_attribut...
|
363
364
|
entry = _get_or_create_named_value_entry(target, name=name, default_value={})
_append_lang_phrase_map(entry["value"], lang=lang, raw_value=raw_value)
|
d350861f
tangwang
索引结构修改
|
365
366
|
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
367
368
369
370
|
def _get_product_id(product: Dict[str, Any]) -> str:
return str(product.get("id") or product.get("spu_id") or "").strip()
|
36516857
tangwang
feat(product_enri...
|
371
372
|
def _get_analysis_field_aliases(field_name: str, schema: AnalysisSchema) -> Tuple[str, ...]:
return schema.field_aliases.get(field_name, (field_name,))
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
373
374
|
|
36516857
tangwang
feat(product_enri...
|
375
376
|
def _get_analysis_field_value(row: Dict[str, Any], field_name: str, schema: AnalysisSchema) -> Any:
for alias in _get_analysis_field_aliases(field_name, schema):
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
|
if alias in row:
return row.get(alias)
return None
def _has_meaningful_value(value: Any) -> bool:
if value is None:
return False
if isinstance(value, str):
return bool(value.strip())
if isinstance(value, dict):
return any(_has_meaningful_value(v) for v in value.values())
if isinstance(value, list):
return any(_has_meaningful_value(v) for v in value)
return bool(value)
def _make_empty_analysis_result(
product: Dict[str, Any],
target_lang: str,
|
36516857
tangwang
feat(product_enri...
|
397
|
schema: AnalysisSchema,
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
398
399
400
401
402
403
404
|
error: Optional[str] = None,
) -> Dict[str, Any]:
result = {
"id": _get_product_id(product),
"lang": target_lang,
"title_input": str(product.get("title") or "").strip(),
}
|
36516857
tangwang
feat(product_enri...
|
405
|
for field in schema.result_fields:
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
406
407
408
409
410
411
412
413
414
415
|
result[field] = ""
if error:
result["error"] = error
return result
def _normalize_analysis_result(
result: Dict[str, Any],
product: Dict[str, Any],
target_lang: str,
|
36516857
tangwang
feat(product_enri...
|
416
|
schema: AnalysisSchema,
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
417
|
) -> Dict[str, Any]:
|
36516857
tangwang
feat(product_enri...
|
418
|
normalized = _make_empty_analysis_result(product, target_lang, schema)
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
419
420
421
422
|
if not isinstance(result, dict):
return normalized
normalized["lang"] = str(result.get("lang") or target_lang).strip() or target_lang
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
423
424
425
426
|
normalized["title_input"] = str(
product.get("title") or result.get("title_input") or ""
).strip()
|
36516857
tangwang
feat(product_enri...
|
427
428
|
for field in schema.result_fields:
normalized[field] = str(_get_analysis_field_value(result, field, schema) or "").strip()
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
429
430
431
432
433
434
|
if result.get("error"):
normalized["error"] = str(result.get("error"))
return normalized
|
36516857
tangwang
feat(product_enri...
|
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
|
def _has_meaningful_analysis_content(result: Dict[str, Any], schema: AnalysisSchema) -> bool:
return any(_has_meaningful_value(result.get(field)) for field in schema.meaningful_fields)
def _append_analysis_attributes(
target: List[Dict[str, Any]],
row: Dict[str, Any],
lang: str,
schema: AnalysisSchema,
field_map: Tuple[Tuple[str, str], ...],
) -> None:
for source_name, output_name in field_map:
raw = _get_analysis_field_value(row, source_name, schema)
if not raw:
continue
_append_named_lang_phrase_map(
target,
name=output_name,
lang=lang,
raw_value=raw,
)
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
456
457
|
|
d350861f
tangwang
索引结构修改
|
458
459
460
461
|
def _apply_index_content_row(result: Dict[str, Any], row: Dict[str, Any], lang: str) -> None:
if not row or row.get("error"):
return
|
36516857
tangwang
feat(product_enri...
|
462
463
|
content_schema = _get_analysis_schema("content")
anchor_text = str(_get_analysis_field_value(row, "anchor_text", content_schema) or "").strip()
|
d350861f
tangwang
索引结构修改
|
464
465
466
|
if anchor_text:
_append_lang_phrase_map(result["qanchors"], lang=lang, raw_value=anchor_text)
|
36516857
tangwang
feat(product_enri...
|
467
468
|
for source_name, output_name in _CONTENT_ANALYSIS_ATTRIBUTE_FIELD_MAP:
raw = _get_analysis_field_value(row, source_name, content_schema)
|
d350861f
tangwang
索引结构修改
|
469
470
|
if not raw:
continue
|
80f1e036
tangwang
enriched_attribut...
|
471
|
_append_named_lang_phrase_map(
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
472
473
474
475
476
477
|
result["enriched_attributes"],
name=output_name,
lang=lang,
raw_value=raw,
)
if output_name == "enriched_tags":
|
d350861f
tangwang
索引结构修改
|
478
479
480
|
_append_lang_phrase_map(result["enriched_tags"], lang=lang, raw_value=raw)
|
2703b6ea
tangwang
refactor(indexer)...
|
481
482
483
484
485
486
487
|
def _apply_index_taxonomy_row(
result: Dict[str, Any],
row: Dict[str, Any],
lang: str,
*,
category_taxonomy_profile: Optional[str] = None,
) -> None:
|
36516857
tangwang
feat(product_enri...
|
488
489
490
491
492
493
494
|
if not row or row.get("error"):
return
_append_analysis_attributes(
result["enriched_taxonomy_attributes"],
row=row,
lang=lang,
|
2703b6ea
tangwang
refactor(indexer)...
|
495
496
497
498
499
|
schema=_get_analysis_schema(
"taxonomy",
category_taxonomy_profile=category_taxonomy_profile,
),
field_map=_get_taxonomy_attribute_field_map(category_taxonomy_profile),
|
36516857
tangwang
feat(product_enri...
|
500
501
502
|
)
|
d350861f
tangwang
索引结构修改
|
503
|
def _normalize_index_content_item(item: Dict[str, Any]) -> Dict[str, str]:
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
504
|
item_id = _get_product_id(item)
|
d350861f
tangwang
索引结构修改
|
505
506
507
508
509
510
511
512
513
514
515
516
|
return {
"id": item_id,
"title": str(item.get("title") or "").strip(),
"brief": str(item.get("brief") or "").strip(),
"description": str(item.get("description") or "").strip(),
"image_url": str(item.get("image_url") or "").strip(),
}
def build_index_content_fields(
items: List[Dict[str, Any]],
tenant_id: Optional[str] = None,
|
2703b6ea
tangwang
refactor(indexer)...
|
517
518
|
enrichment_scopes: Optional[List[str]] = None,
category_taxonomy_profile: Optional[str] = None,
|
d350861f
tangwang
索引结构修改
|
519
520
521
522
523
524
525
526
|
) -> List[Dict[str, Any]]:
"""
高层入口:生成与 ES mapping 对齐的内容理解字段。
输入项需包含:
- `id` 或 `spu_id`
- `title`
- 可选 `brief` / `description` / `image_url`
|
2703b6ea
tangwang
refactor(indexer)...
|
527
528
|
- 可选 `enrichment_scopes`,默认同时执行 `generic` 与 `category_taxonomy`
- 可选 `category_taxonomy_profile`,默认 `apparel`
|
d350861f
tangwang
索引结构修改
|
529
530
531
532
533
534
|
返回项结构:
- `id`
- `qanchors`
- `enriched_tags`
- `enriched_attributes`
|
36516857
tangwang
feat(product_enri...
|
535
|
- `enriched_taxonomy_attributes`
|
d350861f
tangwang
索引结构修改
|
536
537
538
539
540
541
|
- 可选 `error`
其中:
- `qanchors.{lang}` 为短语数组
- `enriched_tags.{lang}` 为标签数组
"""
|
2703b6ea
tangwang
refactor(indexer)...
|
542
543
|
requested_enrichment_scopes = _normalize_enrichment_scopes(enrichment_scopes)
normalized_taxonomy_profile = _normalize_category_taxonomy_profile(category_taxonomy_profile)
|
d350861f
tangwang
索引结构修改
|
544
545
546
547
548
549
550
551
552
553
|
normalized_items = [_normalize_index_content_item(item) for item in items]
if not normalized_items:
return []
results_by_id: Dict[str, Dict[str, Any]] = {
item["id"]: {
"id": item["id"],
"qanchors": {},
"enriched_tags": {},
"enriched_attributes": [],
|
36516857
tangwang
feat(product_enri...
|
554
|
"enriched_taxonomy_attributes": [],
|
d350861f
tangwang
索引结构修改
|
555
556
557
558
559
|
}
for item in normalized_items
}
for lang in _CORE_INDEX_LANGUAGES:
|
2703b6ea
tangwang
refactor(indexer)...
|
560
|
if "generic" in requested_enrichment_scopes:
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
561
562
563
564
565
566
567
|
try:
rows = analyze_products(
products=normalized_items,
target_lang=lang,
batch_size=BATCH_SIZE,
tenant_id=tenant_id,
analysis_kind="content",
|
2703b6ea
tangwang
refactor(indexer)...
|
568
|
category_taxonomy_profile=normalized_taxonomy_profile,
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
569
570
571
572
573
|
)
except Exception as e:
logger.warning("build_index_content_fields content enrichment failed for lang=%s: %s", lang, e)
for item in normalized_items:
results_by_id[item["id"]].setdefault("error", str(e))
|
d350861f
tangwang
索引结构修改
|
574
|
continue
|
36516857
tangwang
feat(product_enri...
|
575
|
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
576
577
578
579
580
581
582
583
584
|
for row in rows or []:
item_id = str(row.get("id") or "").strip()
if not item_id or item_id not in results_by_id:
continue
if row.get("error"):
results_by_id[item_id].setdefault("error", row["error"])
continue
_apply_index_content_row(results_by_id[item_id], row=row, lang=lang)
|
2703b6ea
tangwang
refactor(indexer)...
|
585
|
if "category_taxonomy" in requested_enrichment_scopes:
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
586
587
588
589
590
591
592
|
try:
taxonomy_rows = analyze_products(
products=normalized_items,
target_lang=lang,
batch_size=BATCH_SIZE,
tenant_id=tenant_id,
analysis_kind="taxonomy",
|
2703b6ea
tangwang
refactor(indexer)...
|
593
|
category_taxonomy_profile=normalized_taxonomy_profile,
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
594
595
596
597
598
599
600
601
602
|
)
except Exception as e:
logger.warning(
"build_index_content_fields taxonomy enrichment failed for lang=%s: %s",
lang,
e,
)
for item in normalized_items:
results_by_id[item["id"]].setdefault("error", str(e))
|
36516857
tangwang
feat(product_enri...
|
603
|
continue
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
604
605
606
607
608
609
610
611
|
for row in taxonomy_rows or []:
item_id = str(row.get("id") or "").strip()
if not item_id or item_id not in results_by_id:
continue
if row.get("error"):
results_by_id[item_id].setdefault("error", row["error"])
continue
|
2703b6ea
tangwang
refactor(indexer)...
|
612
613
614
615
616
617
|
_apply_index_taxonomy_row(
results_by_id[item_id],
row=row,
lang=lang,
category_taxonomy_profile=normalized_taxonomy_profile,
)
|
36516857
tangwang
feat(product_enri...
|
618
|
|
d350861f
tangwang
索引结构修改
|
619
620
621
|
return [results_by_id[item["id"]] for item in normalized_items]
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
|
def _normalize_space(text: str) -> str:
return re.sub(r"\s+", " ", (text or "").strip())
def _contains_cjk(text: str) -> bool:
return bool(re.search(r"[\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff]", text or ""))
def _truncate_by_chars(text: str, max_chars: int) -> str:
return text[:max_chars].strip()
def _truncate_by_words(text: str, max_words: int) -> str:
words = re.findall(r"\S+", text or "")
return " ".join(words[:max_words]).strip()
def _detect_prompt_input_lang(text: str) -> str:
# 简化处理:包含 CJK 时按中文类文本处理,否则统一按空格分词类语言处理。
return "zh" if _contains_cjk(text) else "en"
def _build_prompt_input_text(product: Dict[str, Any]) -> str:
"""
生成真正送入 prompt 的商品文本。
规则:
- 默认使用 title
- 若文本过短,则依次补 brief / description
- 若文本过长,则按语言粗粒度截断
"""
fields = [
_normalize_space(str(product.get("title") or "")),
_normalize_space(str(product.get("brief") or "")),
_normalize_space(str(product.get("description") or "")),
]
parts: List[str] = []
def join_parts() -> str:
return " | ".join(part for part in parts if part).strip()
for field in fields:
if not field:
continue
if field not in parts:
parts.append(field)
candidate = join_parts()
if _detect_prompt_input_lang(candidate) == "zh":
if len(candidate) >= PROMPT_INPUT_MIN_ZH_CHARS:
return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS)
else:
if len(re.findall(r"\S+", candidate)) >= PROMPT_INPUT_MIN_WORDS:
return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS)
candidate = join_parts()
if not candidate:
return ""
if _detect_prompt_input_lang(candidate) == "zh":
return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS)
return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS)
|
36516857
tangwang
feat(product_enri...
|
684
|
def _make_analysis_cache_key(
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
685
|
product: Dict[str, Any],
|
6f7840cf
tangwang
refactor: rename ...
|
686
|
target_lang: str,
|
36516857
tangwang
feat(product_enri...
|
687
|
analysis_kind: str,
|
2703b6ea
tangwang
refactor(indexer)...
|
688
|
category_taxonomy_profile: Optional[str] = None,
|
6f7840cf
tangwang
refactor: rename ...
|
689
|
) -> str:
|
36516857
tangwang
feat(product_enri...
|
690
|
"""构造缓存 key,仅由分析类型、prompt 实际输入文本内容与目标语言决定。"""
|
2703b6ea
tangwang
refactor(indexer)...
|
691
692
693
694
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
695
696
|
prompt_input = _build_prompt_input_text(product)
h = hashlib.md5(prompt_input.encode("utf-8")).hexdigest()
|
5aaf0c7d
tangwang
feat(indexer): 完善...
|
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
|
prompt_contract = {
"schema_name": schema.name,
"cache_version": schema.cache_version,
"system_message": SYSTEM_MESSAGE,
"user_instruction_template": USER_INSTRUCTION_TEMPLATE,
"shared_instruction": schema.shared_instruction,
"assistant_headers": schema.get_headers(target_lang),
"result_fields": schema.result_fields,
"meaningful_fields": schema.meaningful_fields,
"field_aliases": schema.field_aliases,
}
prompt_contract_hash = hashlib.md5(
json.dumps(prompt_contract, ensure_ascii=False, sort_keys=True).encode("utf-8")
).hexdigest()[:12]
return (
f"{ANCHOR_CACHE_PREFIX}:{analysis_kind}:{prompt_contract_hash}:"
f"{target_lang}:{prompt_input[:4]}{h}"
)
|
6f7840cf
tangwang
refactor: rename ...
|
715
716
|
|
36516857
tangwang
feat(product_enri...
|
717
|
def _make_anchor_cache_key(
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
718
|
product: Dict[str, Any],
|
6f7840cf
tangwang
refactor: rename ...
|
719
|
target_lang: str,
|
36516857
tangwang
feat(product_enri...
|
720
721
722
723
724
725
726
727
|
) -> str:
return _make_analysis_cache_key(product, target_lang, analysis_kind="content")
def _get_cached_analysis_result(
product: Dict[str, Any],
target_lang: str,
analysis_kind: str,
|
2703b6ea
tangwang
refactor(indexer)...
|
728
|
category_taxonomy_profile: Optional[str] = None,
|
6f7840cf
tangwang
refactor: rename ...
|
729
730
731
|
) -> Optional[Dict[str, Any]]:
if not _anchor_redis:
return None
|
2703b6ea
tangwang
refactor(indexer)...
|
732
733
734
735
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
6f7840cf
tangwang
refactor: rename ...
|
736
|
try:
|
2703b6ea
tangwang
refactor(indexer)...
|
737
738
739
740
741
742
|
key = _make_analysis_cache_key(
product,
target_lang,
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
6f7840cf
tangwang
refactor: rename ...
|
743
744
745
|
raw = _anchor_redis.get(key)
if not raw:
return None
|
36516857
tangwang
feat(product_enri...
|
746
747
748
749
750
751
752
|
result = _normalize_analysis_result(
json.loads(raw),
product=product,
target_lang=target_lang,
schema=schema,
)
if not _has_meaningful_analysis_content(result, schema):
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
753
754
|
return None
return result
|
6f7840cf
tangwang
refactor: rename ...
|
755
|
except Exception as e:
|
36516857
tangwang
feat(product_enri...
|
756
|
logger.warning("Failed to get %s analysis cache: %s", analysis_kind, e)
|
6f7840cf
tangwang
refactor: rename ...
|
757
758
759
|
return None
|
36516857
tangwang
feat(product_enri...
|
760
761
762
763
764
765
766
767
|
def _get_cached_anchor_result(
product: Dict[str, Any],
target_lang: str,
) -> Optional[Dict[str, Any]]:
return _get_cached_analysis_result(product, target_lang, analysis_kind="content")
def _set_cached_analysis_result(
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
768
|
product: Dict[str, Any],
|
6f7840cf
tangwang
refactor: rename ...
|
769
770
|
target_lang: str,
result: Dict[str, Any],
|
36516857
tangwang
feat(product_enri...
|
771
|
analysis_kind: str,
|
2703b6ea
tangwang
refactor(indexer)...
|
772
|
category_taxonomy_profile: Optional[str] = None,
|
6f7840cf
tangwang
refactor: rename ...
|
773
774
775
|
) -> None:
if not _anchor_redis:
return
|
2703b6ea
tangwang
refactor(indexer)...
|
776
777
778
779
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
6f7840cf
tangwang
refactor: rename ...
|
780
|
try:
|
36516857
tangwang
feat(product_enri...
|
781
782
783
784
785
786
787
|
normalized = _normalize_analysis_result(
result,
product=product,
target_lang=target_lang,
schema=schema,
)
if not _has_meaningful_analysis_content(normalized, schema):
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
788
|
return
|
2703b6ea
tangwang
refactor(indexer)...
|
789
790
791
792
793
794
|
key = _make_analysis_cache_key(
product,
target_lang,
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
6f7840cf
tangwang
refactor: rename ...
|
795
|
ttl = ANCHOR_CACHE_EXPIRE_DAYS * 24 * 3600
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
796
|
_anchor_redis.setex(key, ttl, json.dumps(normalized, ensure_ascii=False))
|
6f7840cf
tangwang
refactor: rename ...
|
797
|
except Exception as e:
|
36516857
tangwang
feat(product_enri...
|
798
799
800
801
802
803
804
805
806
|
logger.warning("Failed to set %s analysis cache: %s", analysis_kind, e)
def _set_cached_anchor_result(
product: Dict[str, Any],
target_lang: str,
result: Dict[str, Any],
) -> None:
_set_cached_analysis_result(product, target_lang, result, analysis_kind="content")
|
6f7840cf
tangwang
refactor: rename ...
|
807
808
|
|
a73a751f
tangwang
enrich
|
809
810
811
812
|
def _build_assistant_prefix(headers: List[str]) -> str:
header_line = "| " + " | ".join(headers) + " |"
separator_line = "|" + "----|" * len(headers)
return f"{header_line}\n{separator_line}\n"
|
6f7840cf
tangwang
refactor: rename ...
|
813
|
|
6f7840cf
tangwang
refactor: rename ...
|
814
|
|
36516857
tangwang
feat(product_enri...
|
815
816
|
def _build_shared_context(products: List[Dict[str, str]], schema: AnalysisSchema) -> str:
shared_context = schema.shared_instruction
|
6f7840cf
tangwang
refactor: rename ...
|
817
|
for idx, product in enumerate(products, 1):
|
a47416ec
tangwang
把融合逻辑改成乘法公式,并把 ES...
|
818
819
|
prompt_input = _build_prompt_input_text(product)
shared_context += f"{idx}. {prompt_input}\n"
|
a73a751f
tangwang
enrich
|
820
|
return shared_context
|
6f7840cf
tangwang
refactor: rename ...
|
821
|
|
6f7840cf
tangwang
refactor: rename ...
|
822
|
|
a73a751f
tangwang
enrich
|
823
824
825
826
827
|
def _hash_text(text: str) -> str:
return hashlib.md5((text or "").encode("utf-8")).hexdigest()[:12]
def _mark_shared_context_logged_once(shared_context_key: str) -> bool:
|
41f0b2e9
tangwang
product_enrich支持并发
|
828
829
830
831
|
with _logged_shared_context_lock:
if shared_context_key in _logged_shared_context_keys:
_logged_shared_context_keys.move_to_end(shared_context_key)
return False
|
a73a751f
tangwang
enrich
|
832
|
|
41f0b2e9
tangwang
product_enrich支持并发
|
833
834
835
836
|
_logged_shared_context_keys[shared_context_key] = None
if len(_logged_shared_context_keys) > LOGGED_SHARED_CONTEXT_CACHE_SIZE:
_logged_shared_context_keys.popitem(last=False)
return True
|
6f7840cf
tangwang
refactor: rename ...
|
837
|
|
6f7840cf
tangwang
refactor: rename ...
|
838
|
|
a73a751f
tangwang
enrich
|
839
840
|
def reset_logged_shared_context_keys() -> None:
"""测试辅助:清理已记录的共享 prompt key。"""
|
41f0b2e9
tangwang
product_enrich支持并发
|
841
842
|
with _logged_shared_context_lock:
_logged_shared_context_keys.clear()
|
6f7840cf
tangwang
refactor: rename ...
|
843
|
|
a73a751f
tangwang
enrich
|
844
845
846
847
|
def create_prompt(
products: List[Dict[str, str]],
target_lang: str = "zh",
|
36516857
tangwang
feat(product_enri...
|
848
|
analysis_kind: str = "content",
|
2703b6ea
tangwang
refactor(indexer)...
|
849
|
category_taxonomy_profile: Optional[str] = None,
|
36516857
tangwang
feat(product_enri...
|
850
|
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
a73a751f
tangwang
enrich
|
851
|
"""根据目标语言创建共享上下文、本地化输出要求和 Partial Mode assistant 前缀。"""
|
2703b6ea
tangwang
refactor(indexer)...
|
852
853
854
855
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
36516857
tangwang
feat(product_enri...
|
856
|
markdown_table_headers = schema.get_headers(target_lang)
|
a73a751f
tangwang
enrich
|
857
858
|
if not markdown_table_headers:
logger.warning(
|
36516857
tangwang
feat(product_enri...
|
859
860
|
"Unsupported target_lang for markdown table headers: kind=%s lang=%s",
analysis_kind,
|
a73a751f
tangwang
enrich
|
861
862
863
|
target_lang,
)
return None, None, None
|
36516857
tangwang
feat(product_enri...
|
864
|
shared_context = _build_shared_context(products, schema)
|
a73a751f
tangwang
enrich
|
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
|
language_label = SOURCE_LANG_CODE_MAP.get(target_lang, target_lang)
user_prompt = USER_INSTRUCTION_TEMPLATE.format(language=language_label).strip()
assistant_prefix = _build_assistant_prefix(markdown_table_headers)
return shared_context, user_prompt, assistant_prefix
def _merge_partial_response(assistant_prefix: str, generated_content: str) -> str:
"""将 Partial Mode 的 assistant 前缀与补全文本拼成完整 markdown。"""
generated = (generated_content or "").lstrip()
prefix_lines = [line.strip() for line in assistant_prefix.strip().splitlines()]
generated_lines = generated.splitlines()
if generated_lines:
first_line = generated_lines[0].strip()
if prefix_lines and first_line == prefix_lines[0]:
generated_lines = generated_lines[1:]
if generated_lines and len(prefix_lines) > 1 and generated_lines[0].strip() == prefix_lines[1]:
generated_lines = generated_lines[1:]
elif len(prefix_lines) > 1 and first_line == prefix_lines[1]:
generated_lines = generated_lines[1:]
suffix = "\n".join(generated_lines).lstrip("\n")
if suffix:
return f"{assistant_prefix}{suffix}"
return assistant_prefix
def call_llm(
shared_context: str,
user_prompt: str,
assistant_prefix: str,
target_lang: str = "zh",
|
36516857
tangwang
feat(product_enri...
|
897
|
analysis_kind: str = "content",
|
a73a751f
tangwang
enrich
|
898
899
|
) -> Tuple[str, str]:
"""调用大模型 API(带重试机制),使用 Partial Mode 强制 markdown 表格前缀。"""
|
6f7840cf
tangwang
refactor: rename ...
|
900
901
902
903
|
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
}
|
a73a751f
tangwang
enrich
|
904
905
906
|
shared_context_key = _hash_text(shared_context)
localized_tail_key = _hash_text(f"{target_lang}\n{user_prompt}\n{assistant_prefix}")
combined_user_prompt = f"{shared_context.rstrip()}\n\n{user_prompt.strip()}"
|
6f7840cf
tangwang
refactor: rename ...
|
907
908
909
910
911
912
|
payload = {
"model": MODEL_NAME,
"messages": [
{
"role": "system",
|
a73a751f
tangwang
enrich
|
913
|
"content": SYSTEM_MESSAGE,
|
6f7840cf
tangwang
refactor: rename ...
|
914
915
916
|
},
{
"role": "user",
|
a73a751f
tangwang
enrich
|
917
918
919
920
921
922
|
"content": combined_user_prompt,
},
{
"role": "assistant",
"content": assistant_prefix,
"partial": True,
|
6f7840cf
tangwang
refactor: rename ...
|
923
924
925
926
927
928
929
930
931
932
933
|
},
],
"temperature": 0.3,
"top_p": 0.8,
}
request_data = {
"headers": {k: v for k, v in headers.items() if k != "Authorization"},
"payload": payload,
}
|
a73a751f
tangwang
enrich
|
934
935
936
|
if _mark_shared_context_logged_once(shared_context_key):
logger.info(f"\n{'=' * 80}")
logger.info(
|
36516857
tangwang
feat(product_enri...
|
937
|
"LLM Shared Context [model=%s, kind=%s, shared_key=%s, chars=%s] (logged once per process key)",
|
a73a751f
tangwang
enrich
|
938
|
MODEL_NAME,
|
36516857
tangwang
feat(product_enri...
|
939
|
analysis_kind,
|
a73a751f
tangwang
enrich
|
940
941
942
943
944
|
shared_context_key,
len(shared_context),
)
logger.info("\nSystem Message:\n%s", SYSTEM_MESSAGE)
logger.info("\nShared Context:\n%s", shared_context)
|
6f7840cf
tangwang
refactor: rename ...
|
945
946
|
verbose_logger.info(f"\n{'=' * 80}")
|
a73a751f
tangwang
enrich
|
947
|
verbose_logger.info(
|
36516857
tangwang
feat(product_enri...
|
948
|
"LLM Request [model=%s, kind=%s, lang=%s, shared_key=%s, tail_key=%s]:",
|
a73a751f
tangwang
enrich
|
949
|
MODEL_NAME,
|
36516857
tangwang
feat(product_enri...
|
950
|
analysis_kind,
|
a73a751f
tangwang
enrich
|
951
952
953
954
|
target_lang,
shared_context_key,
localized_tail_key,
)
|
6f7840cf
tangwang
refactor: rename ...
|
955
|
verbose_logger.info(json.dumps(request_data, ensure_ascii=False, indent=2))
|
a73a751f
tangwang
enrich
|
956
957
958
959
960
961
|
verbose_logger.info(f"\nCombined User Prompt:\n{combined_user_prompt}")
verbose_logger.info(f"\nShared Context:\n{shared_context}")
verbose_logger.info(f"\nLocalized Requirement:\n{user_prompt}")
verbose_logger.info(f"\nAssistant Prefix:\n{assistant_prefix}")
logger.info(
|
36516857
tangwang
feat(product_enri...
|
962
963
|
"\nLLM Request Variant [kind=%s, lang=%s, shared_key=%s, tail_key=%s, prompt_chars=%s, prefix_chars=%s]",
analysis_kind,
|
a73a751f
tangwang
enrich
|
964
965
966
967
968
969
970
971
|
target_lang,
shared_context_key,
localized_tail_key,
len(user_prompt),
len(assistant_prefix),
)
logger.info("\nLocalized Requirement:\n%s", user_prompt)
logger.info("\nAssistant Prefix:\n%s", assistant_prefix)
|
6f7840cf
tangwang
refactor: rename ...
|
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
|
# 创建session,禁用代理
session = requests.Session()
session.trust_env = False # 忽略系统代理设置
try:
# 重试机制
for attempt in range(MAX_RETRIES):
try:
response = session.post(
f"{API_BASE_URL}/chat/completions",
headers=headers,
json=payload,
timeout=REQUEST_TIMEOUT,
proxies={"http": None, "https": None}, # 明确禁用代理
)
response.raise_for_status()
result = response.json()
|
a73a751f
tangwang
enrich
|
991
992
993
|
usage = result.get("usage") or {}
verbose_logger.info(
|
36516857
tangwang
feat(product_enri...
|
994
|
"\nLLM Response [model=%s, kind=%s, lang=%s, shared_key=%s, tail_key=%s]:",
|
a73a751f
tangwang
enrich
|
995
|
MODEL_NAME,
|
36516857
tangwang
feat(product_enri...
|
996
|
analysis_kind,
|
a73a751f
tangwang
enrich
|
997
998
999
1000
1001
|
target_lang,
shared_context_key,
localized_tail_key,
)
verbose_logger.info(json.dumps(result, ensure_ascii=False, indent=2))
|
6f7840cf
tangwang
refactor: rename ...
|
1002
|
|
a73a751f
tangwang
enrich
|
1003
1004
|
generated_content = result["choices"][0]["message"]["content"]
full_markdown = _merge_partial_response(assistant_prefix, generated_content)
|
6f7840cf
tangwang
refactor: rename ...
|
1005
|
|
a73a751f
tangwang
enrich
|
1006
|
logger.info(
|
36516857
tangwang
feat(product_enri...
|
1007
1008
|
"\nLLM Response Summary [kind=%s, lang=%s, shared_key=%s, tail_key=%s, generated_chars=%s, completion_tokens=%s, prompt_tokens=%s, total_tokens=%s]",
analysis_kind,
|
a73a751f
tangwang
enrich
|
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
|
target_lang,
shared_context_key,
localized_tail_key,
len(generated_content or ""),
usage.get("completion_tokens"),
usage.get("prompt_tokens"),
usage.get("total_tokens"),
)
logger.info("\nGenerated Content:\n%s", generated_content)
logger.info("\nMerged Markdown:\n%s", full_markdown)
|
6f7840cf
tangwang
refactor: rename ...
|
1019
|
|
a73a751f
tangwang
enrich
|
1020
1021
|
verbose_logger.info(f"\nGenerated Content:\n{generated_content}")
verbose_logger.info(f"\nMerged Markdown:\n{full_markdown}")
|
6f7840cf
tangwang
refactor: rename ...
|
1022
|
|
a73a751f
tangwang
enrich
|
1023
|
return full_markdown, json.dumps(result, ensure_ascii=False)
|
6f7840cf
tangwang
refactor: rename ...
|
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
|
except requests.exceptions.ProxyError as e:
logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Proxy error - {str(e)}")
if attempt < MAX_RETRIES - 1:
logger.info(f"Retrying in {RETRY_DELAY} seconds...")
time.sleep(RETRY_DELAY)
else:
raise
except requests.exceptions.RequestException as e:
logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Request error - {str(e)}")
if attempt < MAX_RETRIES - 1:
logger.info(f"Retrying in {RETRY_DELAY} seconds...")
time.sleep(RETRY_DELAY)
else:
raise
except Exception as e:
logger.error(f"Unexpected error on attempt {attempt + 1}/{MAX_RETRIES}: {str(e)}")
if attempt < MAX_RETRIES - 1:
logger.info(f"Retrying in {RETRY_DELAY} seconds...")
time.sleep(RETRY_DELAY)
else:
raise
finally:
session.close()
|
36516857
tangwang
feat(product_enri...
|
1053
1054
1055
|
def parse_markdown_table(
markdown_content: str,
analysis_kind: str = "content",
|
2703b6ea
tangwang
refactor(indexer)...
|
1056
|
category_taxonomy_profile: Optional[str] = None,
|
36516857
tangwang
feat(product_enri...
|
1057
|
) -> List[Dict[str, str]]:
|
6f7840cf
tangwang
refactor: rename ...
|
1058
|
"""解析markdown表格内容"""
|
2703b6ea
tangwang
refactor(indexer)...
|
1059
1060
1061
1062
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
6f7840cf
tangwang
refactor: rename ...
|
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
|
lines = markdown_content.strip().split("\n")
data = []
data_started = False
for line in lines:
line = line.strip()
if not line:
continue
# 表格行处理
if line.startswith("|"):
# 分隔行(---- 或 :---: 等;允许空格,如 "| ---- | ---- |")
sep_chars = line.replace("|", "").strip().replace(" ", "")
if sep_chars and set(sep_chars) <= {"-", ":"}:
data_started = True
continue
# 首个表头行:无论语言如何,统一跳过
if not data_started:
# 等待下一行数据行
continue
# 解析数据行
parts = [p.strip() for p in line.split("|")]
|
36516857
tangwang
feat(product_enri...
|
1087
1088
1089
1090
|
if parts and parts[0] == "":
parts = parts[1:]
if parts and parts[-1] == "":
parts = parts[:-1]
|
6f7840cf
tangwang
refactor: rename ...
|
1091
1092
|
if len(parts) >= 2:
|
36516857
tangwang
feat(product_enri...
|
1093
1094
1095
|
row = {"seq_no": parts[0]}
for field_index, field_name in enumerate(schema.result_fields, start=1):
row[field_name] = parts[field_index] if len(parts) > field_index else ""
|
6f7840cf
tangwang
refactor: rename ...
|
1096
1097
1098
1099
1100
|
data.append(row)
return data
|
a73a751f
tangwang
enrich
|
1101
1102
1103
1104
1105
|
def _log_parsed_result_quality(
batch_data: List[Dict[str, str]],
parsed_results: List[Dict[str, str]],
target_lang: str,
batch_num: int,
|
36516857
tangwang
feat(product_enri...
|
1106
|
analysis_kind: str,
|
2703b6ea
tangwang
refactor(indexer)...
|
1107
|
category_taxonomy_profile: Optional[str] = None,
|
a73a751f
tangwang
enrich
|
1108
|
) -> None:
|
2703b6ea
tangwang
refactor(indexer)...
|
1109
1110
1111
1112
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
a73a751f
tangwang
enrich
|
1113
1114
1115
1116
|
expected = len(batch_data)
actual = len(parsed_results)
if actual != expected:
logger.warning(
|
36516857
tangwang
feat(product_enri...
|
1117
1118
|
"Parsed row count mismatch for kind=%s batch=%s lang=%s: expected=%s actual=%s",
analysis_kind,
|
a73a751f
tangwang
enrich
|
1119
1120
1121
1122
1123
1124
|
batch_num,
target_lang,
expected,
actual,
)
|
36516857
tangwang
feat(product_enri...
|
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
|
if not schema.quality_fields:
logger.info(
"Parsed Quality Summary [kind=%s, batch=%s, lang=%s]: rows=%s/%s",
analysis_kind,
batch_num,
target_lang,
actual,
expected,
)
return
|
a73a751f
tangwang
enrich
|
1135
|
|
36516857
tangwang
feat(product_enri...
|
1136
1137
1138
1139
1140
|
missing_summary = ", ".join(
f"missing_{field}="
f"{sum(1 for item in parsed_results if not str(item.get(field) or '').strip())}"
for field in schema.quality_fields
)
|
a73a751f
tangwang
enrich
|
1141
|
logger.info(
|
36516857
tangwang
feat(product_enri...
|
1142
1143
|
"Parsed Quality Summary [kind=%s, batch=%s, lang=%s]: rows=%s/%s, %s",
analysis_kind,
|
a73a751f
tangwang
enrich
|
1144
1145
1146
1147
|
batch_num,
target_lang,
actual,
expected,
|
36516857
tangwang
feat(product_enri...
|
1148
|
missing_summary,
|
a73a751f
tangwang
enrich
|
1149
1150
1151
|
)
|
6f7840cf
tangwang
refactor: rename ...
|
1152
1153
1154
1155
|
def process_batch(
batch_data: List[Dict[str, str]],
batch_num: int,
target_lang: str = "zh",
|
36516857
tangwang
feat(product_enri...
|
1156
|
analysis_kind: str = "content",
|
2703b6ea
tangwang
refactor(indexer)...
|
1157
|
category_taxonomy_profile: Optional[str] = None,
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1158
|
) -> List[Dict[str, Any]]:
|
6f7840cf
tangwang
refactor: rename ...
|
1159
|
"""处理一个批次的数据"""
|
2703b6ea
tangwang
refactor(indexer)...
|
1160
1161
1162
1163
|
schema = _get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
6f7840cf
tangwang
refactor: rename ...
|
1164
|
logger.info(f"\n{'#' * 80}")
|
36516857
tangwang
feat(product_enri...
|
1165
1166
1167
1168
1169
1170
|
logger.info(
"Processing Batch %s (%s items, kind=%s)",
batch_num,
len(batch_data),
analysis_kind,
)
|
6f7840cf
tangwang
refactor: rename ...
|
1171
1172
|
# 创建提示词
|
a73a751f
tangwang
enrich
|
1173
1174
1175
|
shared_context, user_prompt, assistant_prefix = create_prompt(
batch_data,
target_lang=target_lang,
|
36516857
tangwang
feat(product_enri...
|
1176
|
analysis_kind=analysis_kind,
|
2703b6ea
tangwang
refactor(indexer)...
|
1177
|
category_taxonomy_profile=category_taxonomy_profile,
|
a73a751f
tangwang
enrich
|
1178
1179
1180
1181
1182
|
)
# 如果提示词创建失败(例如不支持的 target_lang),本次批次整体失败,不再继续调用 LLM
if shared_context is None or user_prompt is None or assistant_prefix is None:
logger.error(
|
36516857
tangwang
feat(product_enri...
|
1183
|
"Failed to create prompt for batch %s, kind=%s, target_lang=%s; "
|
a73a751f
tangwang
enrich
|
1184
1185
|
"marking entire batch as failed without calling LLM",
batch_num,
|
36516857
tangwang
feat(product_enri...
|
1186
|
analysis_kind,
|
a73a751f
tangwang
enrich
|
1187
1188
1189
|
target_lang,
)
return [
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1190
1191
1192
|
_make_empty_analysis_result(
item,
target_lang,
|
36516857
tangwang
feat(product_enri...
|
1193
|
schema,
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1194
1195
|
error=f"prompt_creation_failed: unsupported target_lang={target_lang}",
)
|
a73a751f
tangwang
enrich
|
1196
1197
|
for item in batch_data
]
|
6f7840cf
tangwang
refactor: rename ...
|
1198
1199
1200
|
# 调用LLM
try:
|
a73a751f
tangwang
enrich
|
1201
1202
1203
1204
1205
|
raw_response, full_response_json = call_llm(
shared_context,
user_prompt,
assistant_prefix,
target_lang=target_lang,
|
36516857
tangwang
feat(product_enri...
|
1206
|
analysis_kind=analysis_kind,
|
a73a751f
tangwang
enrich
|
1207
|
)
|
6f7840cf
tangwang
refactor: rename ...
|
1208
1209
|
# 解析结果
|
2703b6ea
tangwang
refactor(indexer)...
|
1210
1211
1212
1213
1214
|
parsed_results = parse_markdown_table(
raw_response,
analysis_kind=analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
36516857
tangwang
feat(product_enri...
|
1215
1216
1217
1218
1219
1220
|
_log_parsed_result_quality(
batch_data,
parsed_results,
target_lang,
batch_num,
analysis_kind,
|
2703b6ea
tangwang
refactor(indexer)...
|
1221
|
category_taxonomy_profile,
|
36516857
tangwang
feat(product_enri...
|
1222
|
)
|
6f7840cf
tangwang
refactor: rename ...
|
1223
1224
1225
1226
1227
1228
1229
1230
|
logger.info(f"\nParsed Results ({len(parsed_results)} items):")
logger.info(json.dumps(parsed_results, ensure_ascii=False, indent=2))
# 映射回原始ID
results_with_ids = []
for i, parsed_item in enumerate(parsed_results):
if i < len(batch_data):
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1231
1232
1233
1234
1235
|
source_product = batch_data[i]
result = _normalize_analysis_result(
parsed_item,
product=source_product,
target_lang=target_lang,
|
36516857
tangwang
feat(product_enri...
|
1236
|
schema=schema,
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1237
|
)
|
6f7840cf
tangwang
refactor: rename ...
|
1238
|
results_with_ids.append(result)
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1239
|
logger.info(
|
36516857
tangwang
feat(product_enri...
|
1240
1241
|
"Mapped: kind=%s seq=%s -> original_id=%s",
analysis_kind,
|
90de78aa
tangwang
enrich接口 因为接口迭代、跟...
|
1242
1243
1244
|
parsed_item.get("seq_no"),
source_product.get("id"),
)
|
6f7840cf
tangwang
refactor: rename ...
|
1245
1246
1247
1248
|
# 保存批次 JSON 日志到独立文件
batch_log = {
"batch_num": batch_num,
|
36516857
tangwang
feat(product_enri...
|
1249
|
"analysis_kind": analysis_kind,
|
6f7840cf
tangwang
refactor: rename ...
|
1250
1251
1252
1253
1254
1255
1256
1257
|
"timestamp": datetime.now().isoformat(),
"input_products": batch_data,
"raw_response": raw_response,
"full_response_json": full_response_json,
"parsed_results": parsed_results,
"final_results": results_with_ids,
}
|
41f0b2e9
tangwang
product_enrich支持并发
|
1258
1259
|
# 并发写 batch json 日志时,保证文件名唯一避免覆盖
batch_call_id = uuid.uuid4().hex[:12]
|
36516857
tangwang
feat(product_enri...
|
1260
1261
1262
1263
|
batch_log_file = (
LOG_DIR
/ f"batch_{analysis_kind}_{batch_num:04d}_{timestamp}_{batch_call_id}.json"
)
|
6f7840cf
tangwang
refactor: rename ...
|
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
|
with open(batch_log_file, "w", encoding="utf-8") as f:
json.dump(batch_log, f, ensure_ascii=False, indent=2)
logger.info(f"Batch log saved to: {batch_log_file}")
return results_with_ids
except Exception as e:
logger.error(f"Error processing batch {batch_num}: {str(e)}", exc_info=True)
# 返回空结果,保持ID映射
return [
|
36516857
tangwang
feat(product_enri...
|
1275
|
_make_empty_analysis_result(item, target_lang, schema, error=str(e))
|
6f7840cf
tangwang
refactor: rename ...
|
1276
1277
1278
1279
1280
1281
1282
1283
1284
|
for item in batch_data
]
def analyze_products(
products: List[Dict[str, str]],
target_lang: str = "zh",
batch_size: Optional[int] = None,
tenant_id: Optional[str] = None,
|
36516857
tangwang
feat(product_enri...
|
1285
|
analysis_kind: str = "content",
|
2703b6ea
tangwang
refactor(indexer)...
|
1286
|
category_taxonomy_profile: Optional[str] = None,
|
6f7840cf
tangwang
refactor: rename ...
|
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
|
) -> List[Dict[str, Any]]:
"""
库调用入口:根据输入+语言,返回锚文本及各维度信息。
Args:
products: [{"id": "...", "title": "..."}]
target_lang: 输出语言
batch_size: 批大小,默认使用全局 BATCH_SIZE
"""
if not API_KEY:
raise RuntimeError("DASHSCOPE_API_KEY is not set, cannot call LLM")
if not products:
return []
|
2703b6ea
tangwang
refactor(indexer)...
|
1302
1303
1304
1305
|
_get_analysis_schema(
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
76e1f088
tangwang
1. 减少一列sell point...
|
1306
1307
1308
1309
1310
1311
1312
1313
1314
|
results_by_index: List[Optional[Dict[str, Any]]] = [None] * len(products)
uncached_items: List[Tuple[int, Dict[str, str]]] = []
for idx, product in enumerate(products):
title = str(product.get("title") or "").strip()
if not title:
uncached_items.append((idx, product))
continue
|
2703b6ea
tangwang
refactor(indexer)...
|
1315
1316
1317
1318
1319
1320
|
cached = _get_cached_analysis_result(
product,
target_lang,
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
76e1f088
tangwang
1. 减少一列sell point...
|
1321
1322
1323
|
if cached:
logger.info(
f"[analyze_products] Cache hit for title='{title[:50]}...', "
|
36516857
tangwang
feat(product_enri...
|
1324
|
f"kind={analysis_kind}, lang={target_lang}"
|
76e1f088
tangwang
1. 减少一列sell point...
|
1325
1326
1327
1328
1329
1330
1331
1332
|
)
results_by_index[idx] = cached
continue
uncached_items.append((idx, product))
if not uncached_items:
return [item for item in results_by_index if item is not None]
|
6f7840cf
tangwang
refactor: rename ...
|
1333
1334
1335
1336
1337
1338
|
# call_llm 一次处理上限固定为 BATCH_SIZE(默认 20):
# - 尽可能攒批处理;
# - 即便调用方传入更大的 batch_size,也会自动按上限拆批。
req_bs = BATCH_SIZE if batch_size is None else int(batch_size)
bs = max(1, min(req_bs, BATCH_SIZE))
|
76e1f088
tangwang
1. 减少一列sell point...
|
1339
|
total_batches = (len(uncached_items) + bs - 1) // bs
|
6f7840cf
tangwang
refactor: rename ...
|
1340
|
|
41f0b2e9
tangwang
product_enrich支持并发
|
1341
|
batch_jobs: List[Tuple[int, List[Tuple[int, Dict[str, str]]], List[Dict[str, str]]]] = []
|
76e1f088
tangwang
1. 减少一列sell point...
|
1342
|
for i in range(0, len(uncached_items), bs):
|
6f7840cf
tangwang
refactor: rename ...
|
1343
|
batch_num = i // bs + 1
|
76e1f088
tangwang
1. 减少一列sell point...
|
1344
1345
|
batch_slice = uncached_items[i : i + bs]
batch = [item for _, item in batch_slice]
|
41f0b2e9
tangwang
product_enrich支持并发
|
1346
1347
1348
1349
1350
1351
1352
|
batch_jobs.append((batch_num, batch_slice, batch))
# 只有一个批次时走串行,减少线程池创建开销与日志/日志文件的不可控交织
if total_batches <= 1 or CONTENT_UNDERSTANDING_MAX_WORKERS <= 1:
for batch_num, batch_slice, batch in batch_jobs:
logger.info(
f"[analyze_products] Processing batch {batch_num}/{total_batches}, "
|
36516857
tangwang
feat(product_enri...
|
1353
1354
1355
1356
1357
1358
1359
|
f"size={len(batch)}, kind={analysis_kind}, target_lang={target_lang}"
)
batch_results = process_batch(
batch,
batch_num=batch_num,
target_lang=target_lang,
analysis_kind=analysis_kind,
|
2703b6ea
tangwang
refactor(indexer)...
|
1360
|
category_taxonomy_profile=category_taxonomy_profile,
|
41f0b2e9
tangwang
product_enrich支持并发
|
1361
|
)
|
41f0b2e9
tangwang
product_enrich支持并发
|
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
|
for (original_idx, product), item in zip(batch_slice, batch_results):
results_by_index[original_idx] = item
title_input = str(item.get("title_input") or "").strip()
if not title_input:
continue
if item.get("error"):
# 不缓存错误结果,避免放大临时故障
continue
try:
|
2703b6ea
tangwang
refactor(indexer)...
|
1372
1373
1374
1375
1376
1377
1378
|
_set_cached_analysis_result(
product,
target_lang,
item,
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
41f0b2e9
tangwang
product_enrich支持并发
|
1379
1380
1381
1382
1383
|
except Exception:
# 已在内部记录 warning
pass
else:
max_workers = min(CONTENT_UNDERSTANDING_MAX_WORKERS, len(batch_jobs))
|
6f7840cf
tangwang
refactor: rename ...
|
1384
|
logger.info(
|
41f0b2e9
tangwang
product_enrich支持并发
|
1385
|
"[analyze_products] Using ThreadPoolExecutor for uncached batches: "
|
36516857
tangwang
feat(product_enri...
|
1386
|
"max_workers=%s, total_batches=%s, bs=%s, kind=%s, target_lang=%s",
|
41f0b2e9
tangwang
product_enrich支持并发
|
1387
1388
1389
|
max_workers,
total_batches,
bs,
|
36516857
tangwang
feat(product_enri...
|
1390
|
analysis_kind,
|
41f0b2e9
tangwang
product_enrich支持并发
|
1391
|
target_lang,
|
6f7840cf
tangwang
refactor: rename ...
|
1392
|
)
|
6f7840cf
tangwang
refactor: rename ...
|
1393
|
|
41f0b2e9
tangwang
product_enrich支持并发
|
1394
1395
1396
1397
1398
1399
|
# 只把“LLM 调用 + markdown 解析”放到线程里;Redis get/set 保持在主线程,避免并发写入带来额外风险。
# 注意:线程池是模块级单例,因此这里的 max_workers 主要用于日志语义(实际并发受单例池上限约束)。
executor = _get_content_understanding_executor()
future_by_batch_num: Dict[int, Any] = {}
for batch_num, _batch_slice, batch in batch_jobs:
future_by_batch_num[batch_num] = executor.submit(
|
36516857
tangwang
feat(product_enri...
|
1400
1401
1402
1403
1404
|
process_batch,
batch,
batch_num=batch_num,
target_lang=target_lang,
analysis_kind=analysis_kind,
|
2703b6ea
tangwang
refactor(indexer)...
|
1405
|
category_taxonomy_profile=category_taxonomy_profile,
|
41f0b2e9
tangwang
product_enrich支持并发
|
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
|
)
# 按 batch_num 回填,确保输出稳定(results_by_index 是按原始 input index 映射的)
for batch_num, batch_slice, _batch in batch_jobs:
batch_results = future_by_batch_num[batch_num].result()
for (original_idx, product), item in zip(batch_slice, batch_results):
results_by_index[original_idx] = item
title_input = str(item.get("title_input") or "").strip()
if not title_input:
continue
if item.get("error"):
# 不缓存错误结果,避免放大临时故障
continue
try:
|
2703b6ea
tangwang
refactor(indexer)...
|
1420
1421
1422
1423
1424
1425
1426
|
_set_cached_analysis_result(
product,
target_lang,
item,
analysis_kind,
category_taxonomy_profile=category_taxonomy_profile,
)
|
41f0b2e9
tangwang
product_enrich支持并发
|
1427
1428
1429
|
except Exception:
# 已在内部记录 warning
pass
|
6f7840cf
tangwang
refactor: rename ...
|
1430
|
|
76e1f088
tangwang
1. 减少一列sell point...
|
1431
|
return [item for item in results_by_index if item is not None]
|