Blame view

indexer/product_enrich.py 45.2 KB
6f7840cf   tangwang   refactor: rename ...
1
2
3
4
5
6
7
8
9
10
11
  #!/usr/bin/env python3
  """
  商品内容理解与属性补充模块(product_enrich
  
  提供基于 LLM 的商品锚文本 / 语义属性 / 标签等分析能力,
   indexer  API 在内存中调用(不再负责 CSV 读写)。
  """
  
  import os
  import json
  import logging
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
12
  import re
6f7840cf   tangwang   refactor: rename ...
13
14
  import time
  import hashlib
41f0b2e9   tangwang   product_enrich支持并发
15
16
  import uuid
  import threading
36516857   tangwang   feat(product_enri...
17
  from dataclasses import dataclass, field
a73a751f   tangwang   enrich
18
  from collections import OrderedDict
6f7840cf   tangwang   refactor: rename ...
19
  from datetime import datetime
41f0b2e9   tangwang   product_enrich支持并发
20
  from concurrent.futures import ThreadPoolExecutor
6f7840cf   tangwang   refactor: rename ...
21
22
23
24
25
26
  from typing import List, Dict, Tuple, Any, Optional
  
  import redis
  import requests
  from pathlib import Path
  
86d8358b   tangwang   config optimize
27
  from config.loader import get_app_config
6f7840cf   tangwang   refactor: rename ...
28
  from config.tenant_config_loader import SOURCE_LANG_CODE_MAP
a73a751f   tangwang   enrich
29
30
31
32
33
  from indexer.product_enrich_prompts import (
      SYSTEM_MESSAGE,
      USER_INSTRUCTION_TEMPLATE,
      LANGUAGE_MARKDOWN_TABLE_HEADERS,
      SHARED_ANALYSIS_INSTRUCTION,
36516857   tangwang   feat(product_enri...
34
35
36
      TAXONOMY_LANGUAGE_MARKDOWN_TABLE_HEADERS,
      TAXONOMY_MARKDOWN_TABLE_HEADERS_EN,
      TAXONOMY_SHARED_ANALYSIS_INSTRUCTION,
a73a751f   tangwang   enrich
37
  )
6f7840cf   tangwang   refactor: rename ...
38
39
40
  
  # 配置
  BATCH_SIZE = 20
41f0b2e9   tangwang   product_enrich支持并发
41
42
43
  # enrich-content LLM 批次并发 worker 上限(线程池;仅对 uncached batch 并发)
  _APP_CONFIG = get_app_config()
  CONTENT_UNDERSTANDING_MAX_WORKERS = int(_APP_CONFIG.product_enrich.max_workers)
6f7840cf   tangwang   refactor: rename ...
44
45
46
47
48
49
50
51
52
  # 华北2(北京):https://dashscope.aliyuncs.com/compatible-mode/v1
  # 新加坡:https://dashscope-intl.aliyuncs.com/compatible-mode/v1
  # 美国(弗吉尼亚):https://dashscope-us.aliyuncs.com/compatible-mode/v1
  API_BASE_URL = "https://dashscope-us.aliyuncs.com/compatible-mode/v1"
  MODEL_NAME = "qwen-flash"
  API_KEY = os.environ.get("DASHSCOPE_API_KEY")
  MAX_RETRIES = 3
  RETRY_DELAY = 5  # 秒
  REQUEST_TIMEOUT = 180  # 秒
a73a751f   tangwang   enrich
53
  LOGGED_SHARED_CONTEXT_CACHE_SIZE = 256
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
54
55
56
57
  PROMPT_INPUT_MIN_ZH_CHARS = 20
  PROMPT_INPUT_MAX_ZH_CHARS = 100
  PROMPT_INPUT_MIN_WORDS = 16
  PROMPT_INPUT_MAX_WORDS = 80
6f7840cf   tangwang   refactor: rename ...
58
59
60
61
62
63
64
65
66
67
  
  # 日志路径
  OUTPUT_DIR = Path("output_logs")
  LOG_DIR = OUTPUT_DIR / "logs"
  
  # 设置独立日志(不影响全局 indexer.log)
  LOG_DIR.mkdir(parents=True, exist_ok=True)
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  log_file = LOG_DIR / f"product_enrich_{timestamp}.log"
  verbose_log_file = LOG_DIR / "product_enrich_verbose.log"
a73a751f   tangwang   enrich
68
  _logged_shared_context_keys: "OrderedDict[str, None]" = OrderedDict()
41f0b2e9   tangwang   product_enrich支持并发
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  _logged_shared_context_lock = threading.Lock()
  
  _content_understanding_executor: Optional[ThreadPoolExecutor] = None
  _content_understanding_executor_lock = threading.Lock()
  
  
  def _get_content_understanding_executor() -> ThreadPoolExecutor:
      """
      使用模块级单例线程池,避免同一进程内多次请求叠加创建线程池导致并发失控。
      """
      global _content_understanding_executor
      with _content_understanding_executor_lock:
          if _content_understanding_executor is None:
              _content_understanding_executor = ThreadPoolExecutor(
                  max_workers=CONTENT_UNDERSTANDING_MAX_WORKERS,
                  thread_name_prefix="product-enrich-llm",
              )
          return _content_understanding_executor
6f7840cf   tangwang   refactor: rename ...
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  
  # 主日志 logger:执行流程、批次信息等
  logger = logging.getLogger("product_enrich")
  logger.setLevel(logging.INFO)
  
  if not logger.handlers:
      formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
  
      file_handler = logging.FileHandler(log_file, encoding="utf-8")
      file_handler.setFormatter(formatter)
  
      stream_handler = logging.StreamHandler()
      stream_handler.setFormatter(formatter)
  
      logger.addHandler(file_handler)
      logger.addHandler(stream_handler)
  
      # 避免日志向根 logger 传播,防止写入 logs/indexer.log 等其他文件
      logger.propagate = False
  
  # 详尽日志 logger:专门记录 LLM 请求与响应
  verbose_logger = logging.getLogger("product_enrich_verbose")
  verbose_logger.setLevel(logging.INFO)
  
  if not verbose_logger.handlers:
      verbose_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
      verbose_file_handler = logging.FileHandler(verbose_log_file, encoding="utf-8")
      verbose_file_handler.setFormatter(verbose_formatter)
      verbose_logger.addHandler(verbose_file_handler)
      verbose_logger.propagate = False
  
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
118
119
  logger.info("Verbose LLM logs are written to: %s", verbose_log_file)
  
6f7840cf   tangwang   refactor: rename ...
120
121
  
  # Redis 缓存(用于 anchors / 语义属性)
41f0b2e9   tangwang   product_enrich支持并发
122
  _REDIS_CONFIG = _APP_CONFIG.infrastructure.redis
86d8358b   tangwang   config optimize
123
124
  ANCHOR_CACHE_PREFIX = _REDIS_CONFIG.anchor_cache_prefix
  ANCHOR_CACHE_EXPIRE_DAYS = int(_REDIS_CONFIG.anchor_cache_expire_days)
6f7840cf   tangwang   refactor: rename ...
125
126
127
128
  _anchor_redis: Optional[redis.Redis] = None
  
  try:
      _anchor_redis = redis.Redis(
86d8358b   tangwang   config optimize
129
130
131
          host=_REDIS_CONFIG.host,
          port=_REDIS_CONFIG.port,
          password=_REDIS_CONFIG.password,
6f7840cf   tangwang   refactor: rename ...
132
          decode_responses=True,
86d8358b   tangwang   config optimize
133
134
135
          socket_timeout=_REDIS_CONFIG.socket_timeout,
          socket_connect_timeout=_REDIS_CONFIG.socket_connect_timeout,
          retry_on_timeout=_REDIS_CONFIG.retry_on_timeout,
6f7840cf   tangwang   refactor: rename ...
136
137
138
139
140
141
142
143
          health_check_interval=10,
      )
      _anchor_redis.ping()
      logger.info("Redis cache initialized for product anchors and semantic attributes")
  except Exception as e:
      logger.warning(f"Failed to initialize Redis for anchors cache: {e}")
      _anchor_redis = None
  
a73a751f   tangwang   enrich
144
145
146
147
148
  _missing_prompt_langs = sorted(set(SOURCE_LANG_CODE_MAP) - set(LANGUAGE_MARKDOWN_TABLE_HEADERS))
  if _missing_prompt_langs:
      raise RuntimeError(
          f"Missing product_enrich prompt config for languages: {_missing_prompt_langs}"
      )
6f7840cf   tangwang   refactor: rename ...
149
150
  
  
69881ecb   tangwang   相关性调参、enrich内容解析优化
151
152
  # 多值字段分隔:英文逗号、中文逗号、顿号,及历史约定的 ; | / 与空白
  _MULTI_VALUE_FIELD_SPLIT_RE = re.compile(r"[,、,;|/\n\t]+")
d350861f   tangwang   索引结构修改
153
  _CORE_INDEX_LANGUAGES = ("zh", "en")
5aaf0c7d   tangwang   feat(indexer): 完善...
154
  _DEFAULT_ANALYSIS_KINDS = ("content", "taxonomy")
36516857   tangwang   feat(product_enri...
155
  _CONTENT_ANALYSIS_ATTRIBUTE_FIELD_MAP = (
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
156
157
158
159
160
161
162
163
      ("tags", "enriched_tags"),
      ("target_audience", "target_audience"),
      ("usage_scene", "usage_scene"),
      ("season", "season"),
      ("key_attributes", "key_attributes"),
      ("material", "material"),
      ("features", "features"),
  )
36516857   tangwang   feat(product_enri...
164
  _CONTENT_ANALYSIS_RESULT_FIELDS = (
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
165
166
167
168
169
170
171
172
173
174
175
      "title",
      "category_path",
      "tags",
      "target_audience",
      "usage_scene",
      "season",
      "key_attributes",
      "material",
      "features",
      "anchor_text",
  )
36516857   tangwang   feat(product_enri...
176
  _CONTENT_ANALYSIS_MEANINGFUL_FIELDS = (
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
177
      "tags",
d350861f   tangwang   索引结构修改
178
179
180
181
182
183
      "target_audience",
      "usage_scene",
      "season",
      "key_attributes",
      "material",
      "features",
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
184
      "anchor_text",
d350861f   tangwang   索引结构修改
185
  )
36516857   tangwang   feat(product_enri...
186
  _CONTENT_ANALYSIS_FIELD_ALIASES = {
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
187
188
      "tags": ("tags", "enriched_tags"),
  }
36516857   tangwang   feat(product_enri...
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  _CONTENT_ANALYSIS_QUALITY_FIELDS = ("title", "category_path", "anchor_text")
  _TAXONOMY_ANALYSIS_ATTRIBUTE_FIELD_MAP = (
      ("product_type", "Product Type"),
      ("target_gender", "Target Gender"),
      ("age_group", "Age Group"),
      ("season", "Season"),
      ("fit", "Fit"),
      ("silhouette", "Silhouette"),
      ("neckline", "Neckline"),
      ("sleeve_length_type", "Sleeve Length Type"),
      ("sleeve_style", "Sleeve Style"),
      ("strap_type", "Strap Type"),
      ("rise_waistline", "Rise / Waistline"),
      ("leg_shape", "Leg Shape"),
      ("skirt_shape", "Skirt Shape"),
      ("length_type", "Length Type"),
      ("closure_type", "Closure Type"),
      ("design_details", "Design Details"),
      ("fabric", "Fabric"),
      ("material_composition", "Material Composition"),
      ("fabric_properties", "Fabric Properties"),
      ("clothing_features", "Clothing Features"),
      ("functional_benefits", "Functional Benefits"),
      ("color", "Color"),
      ("color_family", "Color Family"),
      ("print_pattern", "Print / Pattern"),
      ("occasion_end_use", "Occasion / End Use"),
      ("style_aesthetic", "Style Aesthetic"),
  )
  _TAXONOMY_ANALYSIS_RESULT_FIELDS = tuple(
      field_name for field_name, _ in _TAXONOMY_ANALYSIS_ATTRIBUTE_FIELD_MAP
  )
  
  
  @dataclass(frozen=True)
  class AnalysisSchema:
      name: str
      shared_instruction: str
      markdown_table_headers: Dict[str, List[str]]
      result_fields: Tuple[str, ...]
      meaningful_fields: Tuple[str, ...]
5aaf0c7d   tangwang   feat(indexer): 完善...
230
      cache_version: str = "v1"
36516857   tangwang   feat(product_enri...
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
      field_aliases: Dict[str, Tuple[str, ...]] = field(default_factory=dict)
      fallback_headers: Optional[List[str]] = None
      quality_fields: Tuple[str, ...] = ()
  
      def get_headers(self, target_lang: str) -> Optional[List[str]]:
          headers = self.markdown_table_headers.get(target_lang)
          if headers:
              return headers
          if self.fallback_headers:
              return self.fallback_headers
          return None
  
  
  _ANALYSIS_SCHEMAS: Dict[str, AnalysisSchema] = {
      "content": AnalysisSchema(
          name="content",
          shared_instruction=SHARED_ANALYSIS_INSTRUCTION,
          markdown_table_headers=LANGUAGE_MARKDOWN_TABLE_HEADERS,
          result_fields=_CONTENT_ANALYSIS_RESULT_FIELDS,
          meaningful_fields=_CONTENT_ANALYSIS_MEANINGFUL_FIELDS,
5aaf0c7d   tangwang   feat(indexer): 完善...
251
          cache_version="v2",
36516857   tangwang   feat(product_enri...
252
253
254
255
256
257
258
259
260
          field_aliases=_CONTENT_ANALYSIS_FIELD_ALIASES,
          quality_fields=_CONTENT_ANALYSIS_QUALITY_FIELDS,
      ),
      "taxonomy": AnalysisSchema(
          name="taxonomy",
          shared_instruction=TAXONOMY_SHARED_ANALYSIS_INSTRUCTION,
          markdown_table_headers=TAXONOMY_LANGUAGE_MARKDOWN_TABLE_HEADERS,
          result_fields=_TAXONOMY_ANALYSIS_RESULT_FIELDS,
          meaningful_fields=_TAXONOMY_ANALYSIS_RESULT_FIELDS,
5aaf0c7d   tangwang   feat(indexer): 完善...
261
          cache_version="v1",
36516857   tangwang   feat(product_enri...
262
263
264
265
266
267
268
269
270
271
          fallback_headers=TAXONOMY_MARKDOWN_TABLE_HEADERS_EN,
      ),
  }
  
  
  def _get_analysis_schema(analysis_kind: str) -> AnalysisSchema:
      schema = _ANALYSIS_SCHEMAS.get(analysis_kind)
      if schema is None:
          raise ValueError(f"Unsupported analysis_kind: {analysis_kind}")
      return schema
69881ecb   tangwang   相关性调参、enrich内容解析优化
272
273
  
  
5aaf0c7d   tangwang   feat(indexer): 完善...
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  def _normalize_analysis_kinds(
      analysis_kinds: Optional[List[str]] = None,
  ) -> Tuple[str, ...]:
      requested = _DEFAULT_ANALYSIS_KINDS if not analysis_kinds else tuple(analysis_kinds)
      normalized: List[str] = []
      seen = set()
      for analysis_kind in requested:
          schema = _get_analysis_schema(str(analysis_kind).strip())
          if schema.name in seen:
              continue
          seen.add(schema.name)
          normalized.append(schema.name)
      return tuple(normalized)
  
  
69881ecb   tangwang   相关性调参、enrich内容解析优化
289
290
291
292
293
294
295
296
297
298
  def split_multi_value_field(text: Optional[str]) -> List[str]:
      """将 LLM/业务中的多值字符串拆成短语列表(strip 后去空)。"""
      if text is None:
          return []
      s = str(text).strip()
      if not s:
          return []
      return [p.strip() for p in _MULTI_VALUE_FIELD_SPLIT_RE.split(s) if p.strip()]
  
  
d350861f   tangwang   索引结构修改
299
300
301
302
303
304
305
306
307
308
  def _append_lang_phrase_map(target: Dict[str, List[str]], lang: str, raw_value: Any) -> None:
      parts = split_multi_value_field(raw_value)
      if not parts:
          return
      existing = target.get(lang) or []
      merged = list(dict.fromkeys([str(x).strip() for x in existing if str(x).strip()] + parts))
      if merged:
          target[lang] = merged
  
  
80f1e036   tangwang   enriched_attribut...
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  def _get_or_create_named_value_entry(
      target: List[Dict[str, Any]],
      name: str,
      *,
      default_value: Optional[Dict[str, Any]] = None,
  ) -> Dict[str, Any]:
      for item in target:
          if item.get("name") == name:
              value = item.get("value")
              if isinstance(value, dict):
                  return item
              break
  
      entry = {"name": name, "value": default_value or {}}
      target.append(entry)
      return entry
  
  
  def _append_named_lang_phrase_map(
d350861f   tangwang   索引结构修改
328
329
330
331
332
      target: List[Dict[str, Any]],
      name: str,
      lang: str,
      raw_value: Any,
  ) -> None:
80f1e036   tangwang   enriched_attribut...
333
334
      entry = _get_or_create_named_value_entry(target, name=name, default_value={})
      _append_lang_phrase_map(entry["value"], lang=lang, raw_value=raw_value)
d350861f   tangwang   索引结构修改
335
336
  
  
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
337
338
339
340
  def _get_product_id(product: Dict[str, Any]) -> str:
      return str(product.get("id") or product.get("spu_id") or "").strip()
  
  
36516857   tangwang   feat(product_enri...
341
342
  def _get_analysis_field_aliases(field_name: str, schema: AnalysisSchema) -> Tuple[str, ...]:
      return schema.field_aliases.get(field_name, (field_name,))
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
343
344
  
  
36516857   tangwang   feat(product_enri...
345
346
  def _get_analysis_field_value(row: Dict[str, Any], field_name: str, schema: AnalysisSchema) -> Any:
      for alias in _get_analysis_field_aliases(field_name, schema):
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
          if alias in row:
              return row.get(alias)
      return None
  
  
  def _has_meaningful_value(value: Any) -> bool:
      if value is None:
          return False
      if isinstance(value, str):
          return bool(value.strip())
      if isinstance(value, dict):
          return any(_has_meaningful_value(v) for v in value.values())
      if isinstance(value, list):
          return any(_has_meaningful_value(v) for v in value)
      return bool(value)
  
  
  def _make_empty_analysis_result(
      product: Dict[str, Any],
      target_lang: str,
36516857   tangwang   feat(product_enri...
367
      schema: AnalysisSchema,
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
368
369
370
371
372
373
374
      error: Optional[str] = None,
  ) -> Dict[str, Any]:
      result = {
          "id": _get_product_id(product),
          "lang": target_lang,
          "title_input": str(product.get("title") or "").strip(),
      }
36516857   tangwang   feat(product_enri...
375
      for field in schema.result_fields:
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
376
377
378
379
380
381
382
383
384
385
          result[field] = ""
      if error:
          result["error"] = error
      return result
  
  
  def _normalize_analysis_result(
      result: Dict[str, Any],
      product: Dict[str, Any],
      target_lang: str,
36516857   tangwang   feat(product_enri...
386
      schema: AnalysisSchema,
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
387
  ) -> Dict[str, Any]:
36516857   tangwang   feat(product_enri...
388
      normalized = _make_empty_analysis_result(product, target_lang, schema)
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
389
390
391
392
      if not isinstance(result, dict):
          return normalized
  
      normalized["lang"] = str(result.get("lang") or target_lang).strip() or target_lang
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
393
394
395
396
      normalized["title_input"] = str(
          product.get("title") or result.get("title_input") or ""
      ).strip()
  
36516857   tangwang   feat(product_enri...
397
398
      for field in schema.result_fields:
          normalized[field] = str(_get_analysis_field_value(result, field, schema) or "").strip()
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
399
400
401
402
403
404
  
      if result.get("error"):
          normalized["error"] = str(result.get("error"))
      return normalized
  
  
36516857   tangwang   feat(product_enri...
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
  def _has_meaningful_analysis_content(result: Dict[str, Any], schema: AnalysisSchema) -> bool:
      return any(_has_meaningful_value(result.get(field)) for field in schema.meaningful_fields)
  
  
  def _append_analysis_attributes(
      target: List[Dict[str, Any]],
      row: Dict[str, Any],
      lang: str,
      schema: AnalysisSchema,
      field_map: Tuple[Tuple[str, str], ...],
  ) -> None:
      for source_name, output_name in field_map:
          raw = _get_analysis_field_value(row, source_name, schema)
          if not raw:
              continue
          _append_named_lang_phrase_map(
              target,
              name=output_name,
              lang=lang,
              raw_value=raw,
          )
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
426
427
  
  
d350861f   tangwang   索引结构修改
428
429
430
431
  def _apply_index_content_row(result: Dict[str, Any], row: Dict[str, Any], lang: str) -> None:
      if not row or row.get("error"):
          return
  
36516857   tangwang   feat(product_enri...
432
433
      content_schema = _get_analysis_schema("content")
      anchor_text = str(_get_analysis_field_value(row, "anchor_text", content_schema) or "").strip()
d350861f   tangwang   索引结构修改
434
435
436
      if anchor_text:
          _append_lang_phrase_map(result["qanchors"], lang=lang, raw_value=anchor_text)
  
36516857   tangwang   feat(product_enri...
437
438
      for source_name, output_name in _CONTENT_ANALYSIS_ATTRIBUTE_FIELD_MAP:
          raw = _get_analysis_field_value(row, source_name, content_schema)
d350861f   tangwang   索引结构修改
439
440
          if not raw:
              continue
80f1e036   tangwang   enriched_attribut...
441
          _append_named_lang_phrase_map(
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
442
443
444
445
446
447
              result["enriched_attributes"],
              name=output_name,
              lang=lang,
              raw_value=raw,
          )
          if output_name == "enriched_tags":
d350861f   tangwang   索引结构修改
448
449
450
              _append_lang_phrase_map(result["enriched_tags"], lang=lang, raw_value=raw)
  
  
36516857   tangwang   feat(product_enri...
451
452
453
454
455
456
457
458
459
460
461
462
463
  def _apply_index_taxonomy_row(result: Dict[str, Any], row: Dict[str, Any], lang: str) -> None:
      if not row or row.get("error"):
          return
  
      _append_analysis_attributes(
          result["enriched_taxonomy_attributes"],
          row=row,
          lang=lang,
          schema=_get_analysis_schema("taxonomy"),
          field_map=_TAXONOMY_ANALYSIS_ATTRIBUTE_FIELD_MAP,
      )
  
  
d350861f   tangwang   索引结构修改
464
  def _normalize_index_content_item(item: Dict[str, Any]) -> Dict[str, str]:
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
465
      item_id = _get_product_id(item)
d350861f   tangwang   索引结构修改
466
467
468
469
470
471
472
473
474
475
476
477
      return {
          "id": item_id,
          "title": str(item.get("title") or "").strip(),
          "brief": str(item.get("brief") or "").strip(),
          "description": str(item.get("description") or "").strip(),
          "image_url": str(item.get("image_url") or "").strip(),
      }
  
  
  def build_index_content_fields(
      items: List[Dict[str, Any]],
      tenant_id: Optional[str] = None,
5aaf0c7d   tangwang   feat(indexer): 完善...
478
      analysis_kinds: Optional[List[str]] = None,
d350861f   tangwang   索引结构修改
479
480
481
482
483
484
485
486
  ) -> List[Dict[str, Any]]:
      """
      高层入口:生成与 ES mapping 对齐的内容理解字段。
  
      输入项需包含:
      - `id`  `spu_id`
      - `title`
      - 可选 `brief` / `description` / `image_url`
5aaf0c7d   tangwang   feat(indexer): 完善...
487
      - 可选 `analysis_kinds`,默认同时执行 `content`  `taxonomy`
d350861f   tangwang   索引结构修改
488
489
490
491
492
493
  
      返回项结构:
      - `id`
      - `qanchors`
      - `enriched_tags`
      - `enriched_attributes`
36516857   tangwang   feat(product_enri...
494
      - `enriched_taxonomy_attributes`
d350861f   tangwang   索引结构修改
495
496
497
498
499
500
      - 可选 `error`
  
      其中:
      - `qanchors.{lang}` 为短语数组
      - `enriched_tags.{lang}` 为标签数组
      """
5aaf0c7d   tangwang   feat(indexer): 完善...
501
      requested_analysis_kinds = _normalize_analysis_kinds(analysis_kinds)
d350861f   tangwang   索引结构修改
502
503
504
505
506
507
508
509
510
511
      normalized_items = [_normalize_index_content_item(item) for item in items]
      if not normalized_items:
          return []
  
      results_by_id: Dict[str, Dict[str, Any]] = {
          item["id"]: {
              "id": item["id"],
              "qanchors": {},
              "enriched_tags": {},
              "enriched_attributes": [],
36516857   tangwang   feat(product_enri...
512
              "enriched_taxonomy_attributes": [],
d350861f   tangwang   索引结构修改
513
514
515
516
517
          }
          for item in normalized_items
      }
  
      for lang in _CORE_INDEX_LANGUAGES:
5aaf0c7d   tangwang   feat(indexer): 完善...
518
519
520
521
522
523
524
525
526
527
528
529
530
          if "content" in requested_analysis_kinds:
              try:
                  rows = analyze_products(
                      products=normalized_items,
                      target_lang=lang,
                      batch_size=BATCH_SIZE,
                      tenant_id=tenant_id,
                      analysis_kind="content",
                  )
              except Exception as e:
                  logger.warning("build_index_content_fields content enrichment failed for lang=%s: %s", lang, e)
                  for item in normalized_items:
                      results_by_id[item["id"]].setdefault("error", str(e))
d350861f   tangwang   索引结构修改
531
                  continue
36516857   tangwang   feat(product_enri...
532
  
5aaf0c7d   tangwang   feat(indexer): 完善...
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
              for row in rows or []:
                  item_id = str(row.get("id") or "").strip()
                  if not item_id or item_id not in results_by_id:
                      continue
                  if row.get("error"):
                      results_by_id[item_id].setdefault("error", row["error"])
                      continue
                  _apply_index_content_row(results_by_id[item_id], row=row, lang=lang)
  
          if "taxonomy" in requested_analysis_kinds:
              try:
                  taxonomy_rows = analyze_products(
                      products=normalized_items,
                      target_lang=lang,
                      batch_size=BATCH_SIZE,
                      tenant_id=tenant_id,
                      analysis_kind="taxonomy",
                  )
              except Exception as e:
                  logger.warning(
                      "build_index_content_fields taxonomy enrichment failed for lang=%s: %s",
                      lang,
                      e,
                  )
                  for item in normalized_items:
                      results_by_id[item["id"]].setdefault("error", str(e))
36516857   tangwang   feat(product_enri...
559
                  continue
5aaf0c7d   tangwang   feat(indexer): 完善...
560
561
562
563
564
565
566
567
568
  
              for row in taxonomy_rows or []:
                  item_id = str(row.get("id") or "").strip()
                  if not item_id or item_id not in results_by_id:
                      continue
                  if row.get("error"):
                      results_by_id[item_id].setdefault("error", row["error"])
                      continue
                  _apply_index_taxonomy_row(results_by_id[item_id], row=row, lang=lang)
36516857   tangwang   feat(product_enri...
569
  
d350861f   tangwang   索引结构修改
570
571
572
      return [results_by_id[item["id"]] for item in normalized_items]
  
  
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
  def _normalize_space(text: str) -> str:
      return re.sub(r"\s+", " ", (text or "").strip())
  
  
  def _contains_cjk(text: str) -> bool:
      return bool(re.search(r"[\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff]", text or ""))
  
  
  def _truncate_by_chars(text: str, max_chars: int) -> str:
      return text[:max_chars].strip()
  
  
  def _truncate_by_words(text: str, max_words: int) -> str:
      words = re.findall(r"\S+", text or "")
      return " ".join(words[:max_words]).strip()
  
  
  def _detect_prompt_input_lang(text: str) -> str:
      # 简化处理:包含 CJK 时按中文类文本处理,否则统一按空格分词类语言处理。
      return "zh" if _contains_cjk(text) else "en"
  
  
  def _build_prompt_input_text(product: Dict[str, Any]) -> str:
      """
      生成真正送入 prompt 的商品文本。
  
      规则:
      - 默认使用 title
      - 若文本过短,则依次补 brief / description
      - 若文本过长,则按语言粗粒度截断
      """
      fields = [
          _normalize_space(str(product.get("title") or "")),
          _normalize_space(str(product.get("brief") or "")),
          _normalize_space(str(product.get("description") or "")),
      ]
      parts: List[str] = []
  
      def join_parts() -> str:
          return " | ".join(part for part in parts if part).strip()
  
      for field in fields:
          if not field:
              continue
          if field not in parts:
              parts.append(field)
          candidate = join_parts()
          if _detect_prompt_input_lang(candidate) == "zh":
              if len(candidate) >= PROMPT_INPUT_MIN_ZH_CHARS:
                  return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS)
          else:
              if len(re.findall(r"\S+", candidate)) >= PROMPT_INPUT_MIN_WORDS:
                  return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS)
  
      candidate = join_parts()
      if not candidate:
          return ""
      if _detect_prompt_input_lang(candidate) == "zh":
          return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS)
      return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS)
  
  
36516857   tangwang   feat(product_enri...
635
  def _make_analysis_cache_key(
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
636
      product: Dict[str, Any],
6f7840cf   tangwang   refactor: rename ...
637
      target_lang: str,
36516857   tangwang   feat(product_enri...
638
      analysis_kind: str,
6f7840cf   tangwang   refactor: rename ...
639
  ) -> str:
36516857   tangwang   feat(product_enri...
640
      """构造缓存 key,仅由分析类型、prompt 实际输入文本内容与目标语言决定。"""
5aaf0c7d   tangwang   feat(indexer): 完善...
641
      schema = _get_analysis_schema(analysis_kind)
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
642
643
      prompt_input = _build_prompt_input_text(product)
      h = hashlib.md5(prompt_input.encode("utf-8")).hexdigest()
5aaf0c7d   tangwang   feat(indexer): 完善...
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
      prompt_contract = {
          "schema_name": schema.name,
          "cache_version": schema.cache_version,
          "system_message": SYSTEM_MESSAGE,
          "user_instruction_template": USER_INSTRUCTION_TEMPLATE,
          "shared_instruction": schema.shared_instruction,
          "assistant_headers": schema.get_headers(target_lang),
          "result_fields": schema.result_fields,
          "meaningful_fields": schema.meaningful_fields,
          "field_aliases": schema.field_aliases,
      }
      prompt_contract_hash = hashlib.md5(
          json.dumps(prompt_contract, ensure_ascii=False, sort_keys=True).encode("utf-8")
      ).hexdigest()[:12]
      return (
          f"{ANCHOR_CACHE_PREFIX}:{analysis_kind}:{prompt_contract_hash}:"
          f"{target_lang}:{prompt_input[:4]}{h}"
      )
6f7840cf   tangwang   refactor: rename ...
662
663
  
  
36516857   tangwang   feat(product_enri...
664
  def _make_anchor_cache_key(
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
665
      product: Dict[str, Any],
6f7840cf   tangwang   refactor: rename ...
666
      target_lang: str,
36516857   tangwang   feat(product_enri...
667
668
669
670
671
672
673
674
  ) -> str:
      return _make_analysis_cache_key(product, target_lang, analysis_kind="content")
  
  
  def _get_cached_analysis_result(
      product: Dict[str, Any],
      target_lang: str,
      analysis_kind: str,
6f7840cf   tangwang   refactor: rename ...
675
676
677
  ) -> Optional[Dict[str, Any]]:
      if not _anchor_redis:
          return None
36516857   tangwang   feat(product_enri...
678
      schema = _get_analysis_schema(analysis_kind)
6f7840cf   tangwang   refactor: rename ...
679
      try:
36516857   tangwang   feat(product_enri...
680
          key = _make_analysis_cache_key(product, target_lang, analysis_kind)
6f7840cf   tangwang   refactor: rename ...
681
682
683
          raw = _anchor_redis.get(key)
          if not raw:
              return None
36516857   tangwang   feat(product_enri...
684
685
686
687
688
689
690
          result = _normalize_analysis_result(
              json.loads(raw),
              product=product,
              target_lang=target_lang,
              schema=schema,
          )
          if not _has_meaningful_analysis_content(result, schema):
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
691
692
              return None
          return result
6f7840cf   tangwang   refactor: rename ...
693
      except Exception as e:
36516857   tangwang   feat(product_enri...
694
          logger.warning("Failed to get %s analysis cache: %s", analysis_kind, e)
6f7840cf   tangwang   refactor: rename ...
695
696
697
          return None
  
  
36516857   tangwang   feat(product_enri...
698
699
700
701
702
703
704
705
  def _get_cached_anchor_result(
      product: Dict[str, Any],
      target_lang: str,
  ) -> Optional[Dict[str, Any]]:
      return _get_cached_analysis_result(product, target_lang, analysis_kind="content")
  
  
  def _set_cached_analysis_result(
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
706
      product: Dict[str, Any],
6f7840cf   tangwang   refactor: rename ...
707
708
      target_lang: str,
      result: Dict[str, Any],
36516857   tangwang   feat(product_enri...
709
      analysis_kind: str,
6f7840cf   tangwang   refactor: rename ...
710
711
712
  ) -> None:
      if not _anchor_redis:
          return
36516857   tangwang   feat(product_enri...
713
      schema = _get_analysis_schema(analysis_kind)
6f7840cf   tangwang   refactor: rename ...
714
      try:
36516857   tangwang   feat(product_enri...
715
716
717
718
719
720
721
          normalized = _normalize_analysis_result(
              result,
              product=product,
              target_lang=target_lang,
              schema=schema,
          )
          if not _has_meaningful_analysis_content(normalized, schema):
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
722
              return
36516857   tangwang   feat(product_enri...
723
          key = _make_analysis_cache_key(product, target_lang, analysis_kind)
6f7840cf   tangwang   refactor: rename ...
724
          ttl = ANCHOR_CACHE_EXPIRE_DAYS * 24 * 3600
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
725
          _anchor_redis.setex(key, ttl, json.dumps(normalized, ensure_ascii=False))
6f7840cf   tangwang   refactor: rename ...
726
      except Exception as e:
36516857   tangwang   feat(product_enri...
727
728
729
730
731
732
733
734
735
          logger.warning("Failed to set %s analysis cache: %s", analysis_kind, e)
  
  
  def _set_cached_anchor_result(
      product: Dict[str, Any],
      target_lang: str,
      result: Dict[str, Any],
  ) -> None:
      _set_cached_analysis_result(product, target_lang, result, analysis_kind="content")
6f7840cf   tangwang   refactor: rename ...
736
737
  
  
a73a751f   tangwang   enrich
738
739
740
741
  def _build_assistant_prefix(headers: List[str]) -> str:
      header_line = "| " + " | ".join(headers) + " |"
      separator_line = "|" + "----|" * len(headers)
      return f"{header_line}\n{separator_line}\n"
6f7840cf   tangwang   refactor: rename ...
742
  
6f7840cf   tangwang   refactor: rename ...
743
  
36516857   tangwang   feat(product_enri...
744
745
  def _build_shared_context(products: List[Dict[str, str]], schema: AnalysisSchema) -> str:
      shared_context = schema.shared_instruction
6f7840cf   tangwang   refactor: rename ...
746
      for idx, product in enumerate(products, 1):
a47416ec   tangwang   把融合逻辑改成乘法公式,并把 ES...
747
748
          prompt_input = _build_prompt_input_text(product)
          shared_context += f"{idx}. {prompt_input}\n"
a73a751f   tangwang   enrich
749
      return shared_context
6f7840cf   tangwang   refactor: rename ...
750
  
6f7840cf   tangwang   refactor: rename ...
751
  
a73a751f   tangwang   enrich
752
753
754
755
756
  def _hash_text(text: str) -> str:
      return hashlib.md5((text or "").encode("utf-8")).hexdigest()[:12]
  
  
  def _mark_shared_context_logged_once(shared_context_key: str) -> bool:
41f0b2e9   tangwang   product_enrich支持并发
757
758
759
760
      with _logged_shared_context_lock:
          if shared_context_key in _logged_shared_context_keys:
              _logged_shared_context_keys.move_to_end(shared_context_key)
              return False
a73a751f   tangwang   enrich
761
  
41f0b2e9   tangwang   product_enrich支持并发
762
763
764
765
          _logged_shared_context_keys[shared_context_key] = None
          if len(_logged_shared_context_keys) > LOGGED_SHARED_CONTEXT_CACHE_SIZE:
              _logged_shared_context_keys.popitem(last=False)
          return True
6f7840cf   tangwang   refactor: rename ...
766
  
6f7840cf   tangwang   refactor: rename ...
767
  
a73a751f   tangwang   enrich
768
769
  def reset_logged_shared_context_keys() -> None:
      """测试辅助:清理已记录的共享 prompt key。"""
41f0b2e9   tangwang   product_enrich支持并发
770
771
      with _logged_shared_context_lock:
          _logged_shared_context_keys.clear()
6f7840cf   tangwang   refactor: rename ...
772
  
a73a751f   tangwang   enrich
773
774
775
776
  
  def create_prompt(
      products: List[Dict[str, str]],
      target_lang: str = "zh",
36516857   tangwang   feat(product_enri...
777
778
      analysis_kind: str = "content",
  ) -> Tuple[Optional[str], Optional[str], Optional[str]]:
a73a751f   tangwang   enrich
779
      """根据目标语言创建共享上下文、本地化输出要求和 Partial Mode assistant 前缀。"""
36516857   tangwang   feat(product_enri...
780
781
      schema = _get_analysis_schema(analysis_kind)
      markdown_table_headers = schema.get_headers(target_lang)
a73a751f   tangwang   enrich
782
783
      if not markdown_table_headers:
          logger.warning(
36516857   tangwang   feat(product_enri...
784
785
              "Unsupported target_lang for markdown table headers: kind=%s lang=%s",
              analysis_kind,
a73a751f   tangwang   enrich
786
787
788
              target_lang,
          )
          return None, None, None
36516857   tangwang   feat(product_enri...
789
      shared_context = _build_shared_context(products, schema)
a73a751f   tangwang   enrich
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
      language_label = SOURCE_LANG_CODE_MAP.get(target_lang, target_lang)
      user_prompt = USER_INSTRUCTION_TEMPLATE.format(language=language_label).strip()
      assistant_prefix = _build_assistant_prefix(markdown_table_headers)
      return shared_context, user_prompt, assistant_prefix
  
  
  def _merge_partial_response(assistant_prefix: str, generated_content: str) -> str:
      """将 Partial Mode 的 assistant 前缀与补全文本拼成完整 markdown。"""
      generated = (generated_content or "").lstrip()
      prefix_lines = [line.strip() for line in assistant_prefix.strip().splitlines()]
      generated_lines = generated.splitlines()
  
      if generated_lines:
          first_line = generated_lines[0].strip()
          if prefix_lines and first_line == prefix_lines[0]:
              generated_lines = generated_lines[1:]
              if generated_lines and len(prefix_lines) > 1 and generated_lines[0].strip() == prefix_lines[1]:
                  generated_lines = generated_lines[1:]
          elif len(prefix_lines) > 1 and first_line == prefix_lines[1]:
              generated_lines = generated_lines[1:]
  
      suffix = "\n".join(generated_lines).lstrip("\n")
      if suffix:
          return f"{assistant_prefix}{suffix}"
      return assistant_prefix
  
  
  def call_llm(
      shared_context: str,
      user_prompt: str,
      assistant_prefix: str,
      target_lang: str = "zh",
36516857   tangwang   feat(product_enri...
822
      analysis_kind: str = "content",
a73a751f   tangwang   enrich
823
824
  ) -> Tuple[str, str]:
      """调用大模型 API(带重试机制),使用 Partial Mode 强制 markdown 表格前缀。"""
6f7840cf   tangwang   refactor: rename ...
825
826
827
828
      headers = {
          "Authorization": f"Bearer {API_KEY}",
          "Content-Type": "application/json",
      }
a73a751f   tangwang   enrich
829
830
831
      shared_context_key = _hash_text(shared_context)
      localized_tail_key = _hash_text(f"{target_lang}\n{user_prompt}\n{assistant_prefix}")
      combined_user_prompt = f"{shared_context.rstrip()}\n\n{user_prompt.strip()}"
6f7840cf   tangwang   refactor: rename ...
832
833
834
835
836
837
  
      payload = {
          "model": MODEL_NAME,
          "messages": [
              {
                  "role": "system",
a73a751f   tangwang   enrich
838
                  "content": SYSTEM_MESSAGE,
6f7840cf   tangwang   refactor: rename ...
839
840
841
              },
              {
                  "role": "user",
a73a751f   tangwang   enrich
842
843
844
845
846
847
                  "content": combined_user_prompt,
              },
              {
                  "role": "assistant",
                  "content": assistant_prefix,
                  "partial": True,
6f7840cf   tangwang   refactor: rename ...
848
849
850
851
852
853
854
855
856
857
858
              },
          ],
          "temperature": 0.3,
          "top_p": 0.8,
      }
  
      request_data = {
          "headers": {k: v for k, v in headers.items() if k != "Authorization"},
          "payload": payload,
      }
  
a73a751f   tangwang   enrich
859
860
861
      if _mark_shared_context_logged_once(shared_context_key):
          logger.info(f"\n{'=' * 80}")
          logger.info(
36516857   tangwang   feat(product_enri...
862
              "LLM Shared Context [model=%s, kind=%s, shared_key=%s, chars=%s] (logged once per process key)",
a73a751f   tangwang   enrich
863
              MODEL_NAME,
36516857   tangwang   feat(product_enri...
864
              analysis_kind,
a73a751f   tangwang   enrich
865
866
867
868
869
              shared_context_key,
              len(shared_context),
          )
          logger.info("\nSystem Message:\n%s", SYSTEM_MESSAGE)
          logger.info("\nShared Context:\n%s", shared_context)
6f7840cf   tangwang   refactor: rename ...
870
871
  
      verbose_logger.info(f"\n{'=' * 80}")
a73a751f   tangwang   enrich
872
      verbose_logger.info(
36516857   tangwang   feat(product_enri...
873
          "LLM Request [model=%s, kind=%s, lang=%s, shared_key=%s, tail_key=%s]:",
a73a751f   tangwang   enrich
874
          MODEL_NAME,
36516857   tangwang   feat(product_enri...
875
          analysis_kind,
a73a751f   tangwang   enrich
876
877
878
879
          target_lang,
          shared_context_key,
          localized_tail_key,
      )
6f7840cf   tangwang   refactor: rename ...
880
      verbose_logger.info(json.dumps(request_data, ensure_ascii=False, indent=2))
a73a751f   tangwang   enrich
881
882
883
884
885
886
      verbose_logger.info(f"\nCombined User Prompt:\n{combined_user_prompt}")
      verbose_logger.info(f"\nShared Context:\n{shared_context}")
      verbose_logger.info(f"\nLocalized Requirement:\n{user_prompt}")
      verbose_logger.info(f"\nAssistant Prefix:\n{assistant_prefix}")
  
      logger.info(
36516857   tangwang   feat(product_enri...
887
888
          "\nLLM Request Variant [kind=%s, lang=%s, shared_key=%s, tail_key=%s, prompt_chars=%s, prefix_chars=%s]",
          analysis_kind,
a73a751f   tangwang   enrich
889
890
891
892
893
894
895
896
          target_lang,
          shared_context_key,
          localized_tail_key,
          len(user_prompt),
          len(assistant_prefix),
      )
      logger.info("\nLocalized Requirement:\n%s", user_prompt)
      logger.info("\nAssistant Prefix:\n%s", assistant_prefix)
6f7840cf   tangwang   refactor: rename ...
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
  
      # 创建session,禁用代理
      session = requests.Session()
      session.trust_env = False  # 忽略系统代理设置
  
      try:
          # 重试机制
          for attempt in range(MAX_RETRIES):
              try:
                  response = session.post(
                      f"{API_BASE_URL}/chat/completions",
                      headers=headers,
                      json=payload,
                      timeout=REQUEST_TIMEOUT,
                      proxies={"http": None, "https": None},  # 明确禁用代理
                  )
  
                  response.raise_for_status()
                  result = response.json()
a73a751f   tangwang   enrich
916
917
918
                  usage = result.get("usage") or {}
  
                  verbose_logger.info(
36516857   tangwang   feat(product_enri...
919
                      "\nLLM Response [model=%s, kind=%s, lang=%s, shared_key=%s, tail_key=%s]:",
a73a751f   tangwang   enrich
920
                      MODEL_NAME,
36516857   tangwang   feat(product_enri...
921
                      analysis_kind,
a73a751f   tangwang   enrich
922
923
924
925
926
                      target_lang,
                      shared_context_key,
                      localized_tail_key,
                  )
                  verbose_logger.info(json.dumps(result, ensure_ascii=False, indent=2))
6f7840cf   tangwang   refactor: rename ...
927
  
a73a751f   tangwang   enrich
928
929
                  generated_content = result["choices"][0]["message"]["content"]
                  full_markdown = _merge_partial_response(assistant_prefix, generated_content)
6f7840cf   tangwang   refactor: rename ...
930
  
a73a751f   tangwang   enrich
931
                  logger.info(
36516857   tangwang   feat(product_enri...
932
933
                      "\nLLM Response Summary [kind=%s, lang=%s, shared_key=%s, tail_key=%s, generated_chars=%s, completion_tokens=%s, prompt_tokens=%s, total_tokens=%s]",
                      analysis_kind,
a73a751f   tangwang   enrich
934
935
936
937
938
939
940
941
942
943
                      target_lang,
                      shared_context_key,
                      localized_tail_key,
                      len(generated_content or ""),
                      usage.get("completion_tokens"),
                      usage.get("prompt_tokens"),
                      usage.get("total_tokens"),
                  )
                  logger.info("\nGenerated Content:\n%s", generated_content)
                  logger.info("\nMerged Markdown:\n%s", full_markdown)
6f7840cf   tangwang   refactor: rename ...
944
  
a73a751f   tangwang   enrich
945
946
                  verbose_logger.info(f"\nGenerated Content:\n{generated_content}")
                  verbose_logger.info(f"\nMerged Markdown:\n{full_markdown}")
6f7840cf   tangwang   refactor: rename ...
947
  
a73a751f   tangwang   enrich
948
                  return full_markdown, json.dumps(result, ensure_ascii=False)
6f7840cf   tangwang   refactor: rename ...
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
  
              except requests.exceptions.ProxyError as e:
                  logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Proxy error - {str(e)}")
                  if attempt < MAX_RETRIES - 1:
                      logger.info(f"Retrying in {RETRY_DELAY} seconds...")
                      time.sleep(RETRY_DELAY)
                  else:
                      raise
  
              except requests.exceptions.RequestException as e:
                  logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Request error - {str(e)}")
                  if attempt < MAX_RETRIES - 1:
                      logger.info(f"Retrying in {RETRY_DELAY} seconds...")
                      time.sleep(RETRY_DELAY)
                  else:
                      raise
  
              except Exception as e:
                  logger.error(f"Unexpected error on attempt {attempt + 1}/{MAX_RETRIES}: {str(e)}")
                  if attempt < MAX_RETRIES - 1:
                      logger.info(f"Retrying in {RETRY_DELAY} seconds...")
                      time.sleep(RETRY_DELAY)
                  else:
                      raise
  
      finally:
          session.close()
  
  
36516857   tangwang   feat(product_enri...
978
979
980
981
  def parse_markdown_table(
      markdown_content: str,
      analysis_kind: str = "content",
  ) -> List[Dict[str, str]]:
6f7840cf   tangwang   refactor: rename ...
982
      """解析markdown表格内容"""
36516857   tangwang   feat(product_enri...
983
      schema = _get_analysis_schema(analysis_kind)
6f7840cf   tangwang   refactor: rename ...
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
      lines = markdown_content.strip().split("\n")
      data = []
      data_started = False
  
      for line in lines:
          line = line.strip()
          if not line:
              continue
  
          # 表格行处理
          if line.startswith("|"):
              # 分隔行(---- 或 :---: 等;允许空格,如 "| ---- | ---- |")
              sep_chars = line.replace("|", "").strip().replace(" ", "")
              if sep_chars and set(sep_chars) <= {"-", ":"}:
                  data_started = True
                  continue
  
              # 首个表头行:无论语言如何,统一跳过
              if not data_started:
                  # 等待下一行数据行
                  continue
  
              # 解析数据行
              parts = [p.strip() for p in line.split("|")]
36516857   tangwang   feat(product_enri...
1008
1009
1010
1011
              if parts and parts[0] == "":
                  parts = parts[1:]
              if parts and parts[-1] == "":
                  parts = parts[:-1]
6f7840cf   tangwang   refactor: rename ...
1012
1013
  
              if len(parts) >= 2:
36516857   tangwang   feat(product_enri...
1014
1015
1016
                  row = {"seq_no": parts[0]}
                  for field_index, field_name in enumerate(schema.result_fields, start=1):
                      row[field_name] = parts[field_index] if len(parts) > field_index else ""
6f7840cf   tangwang   refactor: rename ...
1017
1018
1019
1020
1021
                  data.append(row)
  
      return data
  
  
a73a751f   tangwang   enrich
1022
1023
1024
1025
1026
  def _log_parsed_result_quality(
      batch_data: List[Dict[str, str]],
      parsed_results: List[Dict[str, str]],
      target_lang: str,
      batch_num: int,
36516857   tangwang   feat(product_enri...
1027
      analysis_kind: str,
a73a751f   tangwang   enrich
1028
  ) -> None:
36516857   tangwang   feat(product_enri...
1029
      schema = _get_analysis_schema(analysis_kind)
a73a751f   tangwang   enrich
1030
1031
1032
1033
      expected = len(batch_data)
      actual = len(parsed_results)
      if actual != expected:
          logger.warning(
36516857   tangwang   feat(product_enri...
1034
1035
              "Parsed row count mismatch for kind=%s batch=%s lang=%s: expected=%s actual=%s",
              analysis_kind,
a73a751f   tangwang   enrich
1036
1037
1038
1039
1040
1041
              batch_num,
              target_lang,
              expected,
              actual,
          )
  
36516857   tangwang   feat(product_enri...
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
      if not schema.quality_fields:
          logger.info(
              "Parsed Quality Summary [kind=%s, batch=%s, lang=%s]: rows=%s/%s",
              analysis_kind,
              batch_num,
              target_lang,
              actual,
              expected,
          )
          return
a73a751f   tangwang   enrich
1052
  
36516857   tangwang   feat(product_enri...
1053
1054
1055
1056
1057
      missing_summary = ", ".join(
          f"missing_{field}="
          f"{sum(1 for item in parsed_results if not str(item.get(field) or '').strip())}"
          for field in schema.quality_fields
      )
a73a751f   tangwang   enrich
1058
      logger.info(
36516857   tangwang   feat(product_enri...
1059
1060
          "Parsed Quality Summary [kind=%s, batch=%s, lang=%s]: rows=%s/%s, %s",
          analysis_kind,
a73a751f   tangwang   enrich
1061
1062
1063
1064
          batch_num,
          target_lang,
          actual,
          expected,
36516857   tangwang   feat(product_enri...
1065
          missing_summary,
a73a751f   tangwang   enrich
1066
1067
1068
      )
  
  
6f7840cf   tangwang   refactor: rename ...
1069
1070
1071
1072
  def process_batch(
      batch_data: List[Dict[str, str]],
      batch_num: int,
      target_lang: str = "zh",
36516857   tangwang   feat(product_enri...
1073
      analysis_kind: str = "content",
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1074
  ) -> List[Dict[str, Any]]:
6f7840cf   tangwang   refactor: rename ...
1075
      """处理一个批次的数据"""
36516857   tangwang   feat(product_enri...
1076
      schema = _get_analysis_schema(analysis_kind)
6f7840cf   tangwang   refactor: rename ...
1077
      logger.info(f"\n{'#' * 80}")
36516857   tangwang   feat(product_enri...
1078
1079
1080
1081
1082
1083
      logger.info(
          "Processing Batch %s (%s items, kind=%s)",
          batch_num,
          len(batch_data),
          analysis_kind,
      )
6f7840cf   tangwang   refactor: rename ...
1084
1085
  
      # 创建提示词
a73a751f   tangwang   enrich
1086
1087
1088
      shared_context, user_prompt, assistant_prefix = create_prompt(
          batch_data,
          target_lang=target_lang,
36516857   tangwang   feat(product_enri...
1089
          analysis_kind=analysis_kind,
a73a751f   tangwang   enrich
1090
1091
1092
1093
1094
      )
  
      # 如果提示词创建失败(例如不支持的 target_lang),本次批次整体失败,不再继续调用 LLM
      if shared_context is None or user_prompt is None or assistant_prefix is None:
          logger.error(
36516857   tangwang   feat(product_enri...
1095
              "Failed to create prompt for batch %s, kind=%s, target_lang=%s; "
a73a751f   tangwang   enrich
1096
1097
              "marking entire batch as failed without calling LLM",
              batch_num,
36516857   tangwang   feat(product_enri...
1098
              analysis_kind,
a73a751f   tangwang   enrich
1099
1100
1101
              target_lang,
          )
          return [
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1102
1103
1104
              _make_empty_analysis_result(
                  item,
                  target_lang,
36516857   tangwang   feat(product_enri...
1105
                  schema,
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1106
1107
                  error=f"prompt_creation_failed: unsupported target_lang={target_lang}",
              )
a73a751f   tangwang   enrich
1108
1109
              for item in batch_data
          ]
6f7840cf   tangwang   refactor: rename ...
1110
1111
1112
  
      # 调用LLM
      try:
a73a751f   tangwang   enrich
1113
1114
1115
1116
1117
          raw_response, full_response_json = call_llm(
              shared_context,
              user_prompt,
              assistant_prefix,
              target_lang=target_lang,
36516857   tangwang   feat(product_enri...
1118
              analysis_kind=analysis_kind,
a73a751f   tangwang   enrich
1119
          )
6f7840cf   tangwang   refactor: rename ...
1120
1121
  
          # 解析结果
36516857   tangwang   feat(product_enri...
1122
1123
1124
1125
1126
1127
1128
1129
          parsed_results = parse_markdown_table(raw_response, analysis_kind=analysis_kind)
          _log_parsed_result_quality(
              batch_data,
              parsed_results,
              target_lang,
              batch_num,
              analysis_kind,
          )
6f7840cf   tangwang   refactor: rename ...
1130
1131
1132
1133
1134
1135
1136
1137
  
          logger.info(f"\nParsed Results ({len(parsed_results)} items):")
          logger.info(json.dumps(parsed_results, ensure_ascii=False, indent=2))
  
          # 映射回原始ID
          results_with_ids = []
          for i, parsed_item in enumerate(parsed_results):
              if i < len(batch_data):
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1138
1139
1140
1141
1142
                  source_product = batch_data[i]
                  result = _normalize_analysis_result(
                      parsed_item,
                      product=source_product,
                      target_lang=target_lang,
36516857   tangwang   feat(product_enri...
1143
                      schema=schema,
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1144
                  )
6f7840cf   tangwang   refactor: rename ...
1145
                  results_with_ids.append(result)
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1146
                  logger.info(
36516857   tangwang   feat(product_enri...
1147
1148
                      "Mapped: kind=%s seq=%s -> original_id=%s",
                      analysis_kind,
90de78aa   tangwang   enrich接口 因为接口迭代、跟...
1149
1150
1151
                      parsed_item.get("seq_no"),
                      source_product.get("id"),
                  )
6f7840cf   tangwang   refactor: rename ...
1152
1153
1154
1155
  
          # 保存批次 JSON 日志到独立文件
          batch_log = {
              "batch_num": batch_num,
36516857   tangwang   feat(product_enri...
1156
              "analysis_kind": analysis_kind,
6f7840cf   tangwang   refactor: rename ...
1157
1158
1159
1160
1161
1162
1163
1164
              "timestamp": datetime.now().isoformat(),
              "input_products": batch_data,
              "raw_response": raw_response,
              "full_response_json": full_response_json,
              "parsed_results": parsed_results,
              "final_results": results_with_ids,
          }
  
41f0b2e9   tangwang   product_enrich支持并发
1165
1166
          # 并发写 batch json 日志时,保证文件名唯一避免覆盖
          batch_call_id = uuid.uuid4().hex[:12]
36516857   tangwang   feat(product_enri...
1167
1168
1169
1170
          batch_log_file = (
              LOG_DIR
              / f"batch_{analysis_kind}_{batch_num:04d}_{timestamp}_{batch_call_id}.json"
          )
6f7840cf   tangwang   refactor: rename ...
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
          with open(batch_log_file, "w", encoding="utf-8") as f:
              json.dump(batch_log, f, ensure_ascii=False, indent=2)
  
          logger.info(f"Batch log saved to: {batch_log_file}")
  
          return results_with_ids
  
      except Exception as e:
          logger.error(f"Error processing batch {batch_num}: {str(e)}", exc_info=True)
          # 返回空结果,保持ID映射
          return [
36516857   tangwang   feat(product_enri...
1182
              _make_empty_analysis_result(item, target_lang, schema, error=str(e))
6f7840cf   tangwang   refactor: rename ...
1183
1184
1185
1186
1187
1188
1189
1190
1191
              for item in batch_data
          ]
  
  
  def analyze_products(
      products: List[Dict[str, str]],
      target_lang: str = "zh",
      batch_size: Optional[int] = None,
      tenant_id: Optional[str] = None,
36516857   tangwang   feat(product_enri...
1192
      analysis_kind: str = "content",
6f7840cf   tangwang   refactor: rename ...
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
  ) -> List[Dict[str, Any]]:
      """
      库调用入口:根据输入+语言,返回锚文本及各维度信息。
  
      Args:
          products: [{"id": "...", "title": "..."}]
          target_lang: 输出语言
          batch_size: 批大小,默认使用全局 BATCH_SIZE
      """
      if not API_KEY:
          raise RuntimeError("DASHSCOPE_API_KEY is not set, cannot call LLM")
  
      if not products:
          return []
  
36516857   tangwang   feat(product_enri...
1208
      _get_analysis_schema(analysis_kind)
76e1f088   tangwang   1. 减少一列sell point...
1209
1210
1211
1212
1213
1214
1215
1216
1217
      results_by_index: List[Optional[Dict[str, Any]]] = [None] * len(products)
      uncached_items: List[Tuple[int, Dict[str, str]]] = []
  
      for idx, product in enumerate(products):
          title = str(product.get("title") or "").strip()
          if not title:
              uncached_items.append((idx, product))
              continue
  
36516857   tangwang   feat(product_enri...
1218
          cached = _get_cached_analysis_result(product, target_lang, analysis_kind)
76e1f088   tangwang   1. 减少一列sell point...
1219
1220
1221
          if cached:
              logger.info(
                  f"[analyze_products] Cache hit for title='{title[:50]}...', "
36516857   tangwang   feat(product_enri...
1222
                  f"kind={analysis_kind}, lang={target_lang}"
76e1f088   tangwang   1. 减少一列sell point...
1223
1224
1225
1226
1227
1228
1229
1230
              )
              results_by_index[idx] = cached
              continue
  
          uncached_items.append((idx, product))
  
      if not uncached_items:
          return [item for item in results_by_index if item is not None]
6f7840cf   tangwang   refactor: rename ...
1231
1232
1233
1234
1235
1236
  
      # call_llm 一次处理上限固定为 BATCH_SIZE(默认 20):
      # - 尽可能攒批处理;
      # - 即便调用方传入更大的 batch_size,也会自动按上限拆批。
      req_bs = BATCH_SIZE if batch_size is None else int(batch_size)
      bs = max(1, min(req_bs, BATCH_SIZE))
76e1f088   tangwang   1. 减少一列sell point...
1237
      total_batches = (len(uncached_items) + bs - 1) // bs
6f7840cf   tangwang   refactor: rename ...
1238
  
41f0b2e9   tangwang   product_enrich支持并发
1239
      batch_jobs: List[Tuple[int, List[Tuple[int, Dict[str, str]]], List[Dict[str, str]]]] = []
76e1f088   tangwang   1. 减少一列sell point...
1240
      for i in range(0, len(uncached_items), bs):
6f7840cf   tangwang   refactor: rename ...
1241
          batch_num = i // bs + 1
76e1f088   tangwang   1. 减少一列sell point...
1242
1243
          batch_slice = uncached_items[i : i + bs]
          batch = [item for _, item in batch_slice]
41f0b2e9   tangwang   product_enrich支持并发
1244
1245
1246
1247
1248
1249
1250
          batch_jobs.append((batch_num, batch_slice, batch))
  
      # 只有一个批次时走串行,减少线程池创建开销与日志/日志文件的不可控交织
      if total_batches <= 1 or CONTENT_UNDERSTANDING_MAX_WORKERS <= 1:
          for batch_num, batch_slice, batch in batch_jobs:
              logger.info(
                  f"[analyze_products] Processing batch {batch_num}/{total_batches}, "
36516857   tangwang   feat(product_enri...
1251
1252
1253
1254
1255
1256
1257
                  f"size={len(batch)}, kind={analysis_kind}, target_lang={target_lang}"
              )
              batch_results = process_batch(
                  batch,
                  batch_num=batch_num,
                  target_lang=target_lang,
                  analysis_kind=analysis_kind,
41f0b2e9   tangwang   product_enrich支持并发
1258
              )
41f0b2e9   tangwang   product_enrich支持并发
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
  
              for (original_idx, product), item in zip(batch_slice, batch_results):
                  results_by_index[original_idx] = item
                  title_input = str(item.get("title_input") or "").strip()
                  if not title_input:
                      continue
                  if item.get("error"):
                      # 不缓存错误结果,避免放大临时故障
                      continue
                  try:
36516857   tangwang   feat(product_enri...
1269
                      _set_cached_analysis_result(product, target_lang, item, analysis_kind)
41f0b2e9   tangwang   product_enrich支持并发
1270
1271
1272
1273
1274
                  except Exception:
                      # 已在内部记录 warning
                      pass
      else:
          max_workers = min(CONTENT_UNDERSTANDING_MAX_WORKERS, len(batch_jobs))
6f7840cf   tangwang   refactor: rename ...
1275
          logger.info(
41f0b2e9   tangwang   product_enrich支持并发
1276
              "[analyze_products] Using ThreadPoolExecutor for uncached batches: "
36516857   tangwang   feat(product_enri...
1277
              "max_workers=%s, total_batches=%s, bs=%s, kind=%s, target_lang=%s",
41f0b2e9   tangwang   product_enrich支持并发
1278
1279
1280
              max_workers,
              total_batches,
              bs,
36516857   tangwang   feat(product_enri...
1281
              analysis_kind,
41f0b2e9   tangwang   product_enrich支持并发
1282
              target_lang,
6f7840cf   tangwang   refactor: rename ...
1283
          )
6f7840cf   tangwang   refactor: rename ...
1284
  
41f0b2e9   tangwang   product_enrich支持并发
1285
1286
1287
1288
1289
1290
          # 只把“LLM 调用 + markdown 解析”放到线程里;Redis get/set 保持在主线程,避免并发写入带来额外风险。
          # 注意:线程池是模块级单例,因此这里的 max_workers 主要用于日志语义(实际并发受单例池上限约束)。
          executor = _get_content_understanding_executor()
          future_by_batch_num: Dict[int, Any] = {}
          for batch_num, _batch_slice, batch in batch_jobs:
              future_by_batch_num[batch_num] = executor.submit(
36516857   tangwang   feat(product_enri...
1291
1292
1293
1294
1295
                  process_batch,
                  batch,
                  batch_num=batch_num,
                  target_lang=target_lang,
                  analysis_kind=analysis_kind,
41f0b2e9   tangwang   product_enrich支持并发
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
              )
  
          # 按 batch_num 回填,确保输出稳定(results_by_index 是按原始 input index 映射的)
          for batch_num, batch_slice, _batch in batch_jobs:
              batch_results = future_by_batch_num[batch_num].result()
              for (original_idx, product), item in zip(batch_slice, batch_results):
                  results_by_index[original_idx] = item
                  title_input = str(item.get("title_input") or "").strip()
                  if not title_input:
                      continue
                  if item.get("error"):
                      # 不缓存错误结果,避免放大临时故障
                      continue
                  try:
36516857   tangwang   feat(product_enri...
1310
                      _set_cached_analysis_result(product, target_lang, item, analysis_kind)
41f0b2e9   tangwang   product_enrich支持并发
1311
1312
1313
                  except Exception:
                      # 已在内部记录 warning
                      pass
6f7840cf   tangwang   refactor: rename ...
1314
  
76e1f088   tangwang   1. 减少一列sell point...
1315
      return [item for item in results_by_index if item is not None]