#!/usr/bin/env python3 """ 商品内容理解与属性补充模块(product_enrich) 提供基于 LLM 的商品锚文本 / 语义属性 / 标签等分析能力, 供 indexer 与 API 在内存中调用(不再负责 CSV 读写)。 """ import os import json import logging import re import time import hashlib import uuid import threading from dataclasses import dataclass, field from collections import OrderedDict from datetime import datetime from concurrent.futures import ThreadPoolExecutor from typing import List, Dict, Tuple, Any, Optional, FrozenSet import redis import requests from pathlib import Path from config.loader import get_app_config from config.tenant_config_loader import SOURCE_LANG_CODE_MAP from indexer.product_enrich_prompts import ( SYSTEM_MESSAGE, USER_INSTRUCTION_TEMPLATE, LANGUAGE_MARKDOWN_TABLE_HEADERS, SHARED_ANALYSIS_INSTRUCTION, CATEGORY_TAXONOMY_PROFILES, ) # 配置 BATCH_SIZE = 20 # enrich-content LLM 批次并发 worker 上限(线程池;仅对 uncached batch 并发) _APP_CONFIG = get_app_config() CONTENT_UNDERSTANDING_MAX_WORKERS = int(_APP_CONFIG.product_enrich.max_workers) # 华北2(北京):https://dashscope.aliyuncs.com/compatible-mode/v1 # 新加坡:https://dashscope-intl.aliyuncs.com/compatible-mode/v1 # 美国(弗吉尼亚):https://dashscope-us.aliyuncs.com/compatible-mode/v1 API_BASE_URL = "https://dashscope-us.aliyuncs.com/compatible-mode/v1" MODEL_NAME = "qwen-flash" API_KEY = os.environ.get("DASHSCOPE_API_KEY") MAX_RETRIES = 3 RETRY_DELAY = 5 # 秒 REQUEST_TIMEOUT = 180 # 秒 LOGGED_SHARED_CONTEXT_CACHE_SIZE = 256 PROMPT_INPUT_MIN_ZH_CHARS = 20 PROMPT_INPUT_MAX_ZH_CHARS = 100 PROMPT_INPUT_MIN_WORDS = 16 PROMPT_INPUT_MAX_WORDS = 80 # 日志路径 OUTPUT_DIR = Path("output_logs") LOG_DIR = OUTPUT_DIR / "logs" # 设置独立日志(不影响全局 indexer.log) LOG_DIR.mkdir(parents=True, exist_ok=True) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") log_file = LOG_DIR / f"product_enrich_{timestamp}.log" verbose_log_file = LOG_DIR / "product_enrich_verbose.log" _logged_shared_context_keys: "OrderedDict[str, None]" = OrderedDict() _logged_shared_context_lock = threading.Lock() _content_understanding_executor: Optional[ThreadPoolExecutor] = None _content_understanding_executor_lock = threading.Lock() def _get_content_understanding_executor() -> ThreadPoolExecutor: """ 使用模块级单例线程池,避免同一进程内多次请求叠加创建线程池导致并发失控。 """ global _content_understanding_executor with _content_understanding_executor_lock: if _content_understanding_executor is None: _content_understanding_executor = ThreadPoolExecutor( max_workers=CONTENT_UNDERSTANDING_MAX_WORKERS, thread_name_prefix="product-enrich-llm", ) return _content_understanding_executor # 主日志 logger:执行流程、批次信息等 logger = logging.getLogger("product_enrich") logger.setLevel(logging.INFO) if not logger.handlers: formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") file_handler = logging.FileHandler(log_file, encoding="utf-8") file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.addHandler(stream_handler) # 避免日志向根 logger 传播,防止写入 logs/indexer.log 等其他文件 logger.propagate = False # 详尽日志 logger:专门记录 LLM 请求与响应 verbose_logger = logging.getLogger("product_enrich_verbose") verbose_logger.setLevel(logging.INFO) if not verbose_logger.handlers: verbose_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") verbose_file_handler = logging.FileHandler(verbose_log_file, encoding="utf-8") verbose_file_handler.setFormatter(verbose_formatter) verbose_logger.addHandler(verbose_file_handler) verbose_logger.propagate = False logger.info("Verbose LLM logs are written to: %s", verbose_log_file) # Redis 缓存(用于 anchors / 语义属性) _REDIS_CONFIG = _APP_CONFIG.infrastructure.redis ANCHOR_CACHE_PREFIX = _REDIS_CONFIG.anchor_cache_prefix ANCHOR_CACHE_EXPIRE_DAYS = int(_REDIS_CONFIG.anchor_cache_expire_days) _anchor_redis: Optional[redis.Redis] = None try: _anchor_redis = redis.Redis( host=_REDIS_CONFIG.host, port=_REDIS_CONFIG.port, password=_REDIS_CONFIG.password, decode_responses=True, socket_timeout=_REDIS_CONFIG.socket_timeout, socket_connect_timeout=_REDIS_CONFIG.socket_connect_timeout, retry_on_timeout=_REDIS_CONFIG.retry_on_timeout, health_check_interval=10, ) _anchor_redis.ping() logger.info("Redis cache initialized for product anchors and semantic attributes") except Exception as e: logger.warning(f"Failed to initialize Redis for anchors cache: {e}") _anchor_redis = None _missing_prompt_langs = sorted(set(SOURCE_LANG_CODE_MAP) - set(LANGUAGE_MARKDOWN_TABLE_HEADERS)) if _missing_prompt_langs: raise RuntimeError( f"Missing product_enrich prompt config for languages: {_missing_prompt_langs}" ) # 多值字段分隔 _MULTI_VALUE_FIELD_SPLIT_RE = re.compile(r"[,、,;|/\n\t]+") # 表格单元格中视为「无内容」的占位 _MARKDOWN_EMPTY_CELL_LITERALS: Tuple[str, ...] = ("-","–", "—", "none", "null", "n/a", "无") _MARKDOWN_EMPTY_CELL_TOKENS_CF: FrozenSet[str] = frozenset( lit.casefold() for lit in _MARKDOWN_EMPTY_CELL_LITERALS ) def _normalize_markdown_table_cell(raw: Optional[str]) -> str: """strip;将占位符统一视为空字符串。""" s = str(raw or "").strip() if not s: return "" if s.casefold() in _MARKDOWN_EMPTY_CELL_TOKENS_CF: return "" return s _CORE_INDEX_LANGUAGES = ("zh", "en") _DEFAULT_ENRICHMENT_SCOPES = ("generic", "category_taxonomy") _DEFAULT_CATEGORY_TAXONOMY_PROFILE = "apparel" _CONTENT_ANALYSIS_ATTRIBUTE_FIELD_MAP = ( ("tags", "enriched_tags"), ("target_audience", "target_audience"), ("usage_scene", "usage_scene"), ("season", "season"), ("key_attributes", "key_attributes"), ("material", "material"), ("features", "features"), ) _CONTENT_ANALYSIS_RESULT_FIELDS = ( "title", "category_path", "tags", "target_audience", "usage_scene", "season", "key_attributes", "material", "features", "anchor_text", ) _CONTENT_ANALYSIS_MEANINGFUL_FIELDS = ( "tags", "target_audience", "usage_scene", "season", "key_attributes", "material", "features", "anchor_text", ) _CONTENT_ANALYSIS_FIELD_ALIASES = { "tags": ("tags", "enriched_tags"), } _CONTENT_ANALYSIS_QUALITY_FIELDS = ("title", "category_path", "anchor_text") @dataclass(frozen=True) class AnalysisSchema: name: str shared_instruction: str markdown_table_headers: Dict[str, List[str]] result_fields: Tuple[str, ...] meaningful_fields: Tuple[str, ...] cache_version: str = "v1" field_aliases: Dict[str, Tuple[str, ...]] = field(default_factory=dict) quality_fields: Tuple[str, ...] = () def get_headers(self, target_lang: str) -> Optional[List[str]]: return self.markdown_table_headers.get(target_lang) _ANALYSIS_SCHEMAS: Dict[str, AnalysisSchema] = { "content": AnalysisSchema( name="content", shared_instruction=SHARED_ANALYSIS_INSTRUCTION, markdown_table_headers=LANGUAGE_MARKDOWN_TABLE_HEADERS, result_fields=_CONTENT_ANALYSIS_RESULT_FIELDS, meaningful_fields=_CONTENT_ANALYSIS_MEANINGFUL_FIELDS, cache_version="v2", field_aliases=_CONTENT_ANALYSIS_FIELD_ALIASES, quality_fields=_CONTENT_ANALYSIS_QUALITY_FIELDS, ), } def _build_taxonomy_profile_schema(profile: str, config: Dict[str, Any]) -> AnalysisSchema: return AnalysisSchema( name=f"taxonomy:{profile}", shared_instruction=config["shared_instruction"], markdown_table_headers=config["markdown_table_headers"], result_fields=tuple(field["key"] for field in config["fields"]), meaningful_fields=tuple(field["key"] for field in config["fields"]), cache_version="v1", ) _CATEGORY_TAXONOMY_PROFILE_SCHEMAS: Dict[str, AnalysisSchema] = { profile: _build_taxonomy_profile_schema(profile, config) for profile, config in CATEGORY_TAXONOMY_PROFILES.items() } _CATEGORY_TAXONOMY_PROFILE_ATTRIBUTE_FIELD_MAPS: Dict[str, Tuple[Tuple[str, str], ...]] = { profile: tuple((field["key"], field["label"]) for field in config["fields"]) for profile, config in CATEGORY_TAXONOMY_PROFILES.items() } def get_supported_category_taxonomy_profiles() -> Tuple[str, ...]: return tuple(_CATEGORY_TAXONOMY_PROFILE_SCHEMAS.keys()) def _normalize_category_taxonomy_profile(category_taxonomy_profile: Optional[str] = None) -> str: profile = str(category_taxonomy_profile or _DEFAULT_CATEGORY_TAXONOMY_PROFILE).strip() if profile not in _CATEGORY_TAXONOMY_PROFILE_SCHEMAS: supported = ", ".join(get_supported_category_taxonomy_profiles()) raise ValueError( f"Unsupported category_taxonomy_profile: {profile}. Supported profiles: {supported}" ) return profile def _get_analysis_schema( analysis_kind: str, *, category_taxonomy_profile: Optional[str] = None, ) -> AnalysisSchema: if analysis_kind == "content": return _ANALYSIS_SCHEMAS["content"] if analysis_kind == "taxonomy": profile = _normalize_category_taxonomy_profile(category_taxonomy_profile) return _CATEGORY_TAXONOMY_PROFILE_SCHEMAS[profile] raise ValueError(f"Unsupported analysis_kind: {analysis_kind}") def _get_taxonomy_attribute_field_map( category_taxonomy_profile: Optional[str] = None, ) -> Tuple[Tuple[str, str], ...]: profile = _normalize_category_taxonomy_profile(category_taxonomy_profile) return _CATEGORY_TAXONOMY_PROFILE_ATTRIBUTE_FIELD_MAPS[profile] def _normalize_enrichment_scopes( enrichment_scopes: Optional[List[str]] = None, ) -> Tuple[str, ...]: requested = _DEFAULT_ENRICHMENT_SCOPES if not enrichment_scopes else tuple(enrichment_scopes) normalized: List[str] = [] seen = set() for enrichment_scope in requested: scope = str(enrichment_scope).strip() if scope not in {"generic", "category_taxonomy"}: raise ValueError(f"Unsupported enrichment_scope: {scope}") if scope in seen: continue seen.add(scope) normalized.append(scope) return tuple(normalized) def split_multi_value_field(text: Optional[str]) -> List[str]: """将 LLM/业务中的多值字符串拆成短语列表(strip 后去空)。""" if text is None: return [] s = str(text).strip() if not s: return [] return [p.strip() for p in _MULTI_VALUE_FIELD_SPLIT_RE.split(s) if p.strip()] def _append_lang_phrase_map(target: Dict[str, List[str]], lang: str, raw_value: Any) -> None: parts = split_multi_value_field(raw_value) if not parts: return existing = target.get(lang) or [] merged = list(dict.fromkeys([str(x).strip() for x in existing if str(x).strip()] + parts)) if merged: target[lang] = merged def _get_or_create_named_value_entry( target: List[Dict[str, Any]], name: str, *, default_value: Optional[Dict[str, Any]] = None, ) -> Dict[str, Any]: for item in target: if item.get("name") == name: value = item.get("value") if isinstance(value, dict): return item break entry = {"name": name, "value": default_value or {}} target.append(entry) return entry def _append_named_lang_phrase_map( target: List[Dict[str, Any]], name: str, lang: str, raw_value: Any, ) -> None: entry = _get_or_create_named_value_entry(target, name=name, default_value={}) _append_lang_phrase_map(entry["value"], lang=lang, raw_value=raw_value) def _get_product_id(product: Dict[str, Any]) -> str: return str(product.get("id") or product.get("spu_id") or "").strip() def _get_analysis_field_aliases(field_name: str, schema: AnalysisSchema) -> Tuple[str, ...]: return schema.field_aliases.get(field_name, (field_name,)) def _get_analysis_field_value(row: Dict[str, Any], field_name: str, schema: AnalysisSchema) -> Any: for alias in _get_analysis_field_aliases(field_name, schema): if alias in row: return row.get(alias) return None def _has_meaningful_value(value: Any) -> bool: if value is None: return False if isinstance(value, str): return bool(value.strip()) if isinstance(value, dict): return any(_has_meaningful_value(v) for v in value.values()) if isinstance(value, list): return any(_has_meaningful_value(v) for v in value) return bool(value) def _make_empty_analysis_result( product: Dict[str, Any], target_lang: str, schema: AnalysisSchema, error: Optional[str] = None, ) -> Dict[str, Any]: result = { "id": _get_product_id(product), "lang": target_lang, "title_input": str(product.get("title") or "").strip(), } for field in schema.result_fields: result[field] = "" if error: result["error"] = error return result def _normalize_analysis_result( result: Dict[str, Any], product: Dict[str, Any], target_lang: str, schema: AnalysisSchema, ) -> Dict[str, Any]: normalized = _make_empty_analysis_result(product, target_lang, schema) if not isinstance(result, dict): return normalized normalized["lang"] = str(result.get("lang") or target_lang).strip() or target_lang normalized["title_input"] = str( product.get("title") or result.get("title_input") or "" ).strip() for field in schema.result_fields: normalized[field] = str(_get_analysis_field_value(result, field, schema) or "").strip() if result.get("error"): normalized["error"] = str(result.get("error")) return normalized def _has_meaningful_analysis_content(result: Dict[str, Any], schema: AnalysisSchema) -> bool: return any(_has_meaningful_value(result.get(field)) for field in schema.meaningful_fields) def _append_analysis_attributes( target: List[Dict[str, Any]], row: Dict[str, Any], lang: str, schema: AnalysisSchema, field_map: Tuple[Tuple[str, str], ...], ) -> None: for source_name, output_name in field_map: raw = _get_analysis_field_value(row, source_name, schema) if not raw: continue _append_named_lang_phrase_map( target, name=output_name, lang=lang, raw_value=raw, ) def _apply_index_content_row(result: Dict[str, Any], row: Dict[str, Any], lang: str) -> None: if not row or row.get("error"): return content_schema = _get_analysis_schema("content") anchor_text = str(_get_analysis_field_value(row, "anchor_text", content_schema) or "").strip() if anchor_text: _append_lang_phrase_map(result["qanchors"], lang=lang, raw_value=anchor_text) for source_name, output_name in _CONTENT_ANALYSIS_ATTRIBUTE_FIELD_MAP: raw = _get_analysis_field_value(row, source_name, content_schema) if not raw: continue _append_named_lang_phrase_map( result["enriched_attributes"], name=output_name, lang=lang, raw_value=raw, ) if output_name == "enriched_tags": _append_lang_phrase_map(result["enriched_tags"], lang=lang, raw_value=raw) def _apply_index_taxonomy_row( result: Dict[str, Any], row: Dict[str, Any], lang: str, *, category_taxonomy_profile: Optional[str] = None, ) -> None: if not row or row.get("error"): return _append_analysis_attributes( result["enriched_taxonomy_attributes"], row=row, lang=lang, schema=_get_analysis_schema( "taxonomy", category_taxonomy_profile=category_taxonomy_profile, ), field_map=_get_taxonomy_attribute_field_map(category_taxonomy_profile), ) def _normalize_index_content_item(item: Dict[str, Any]) -> Dict[str, str]: item_id = _get_product_id(item) return { "id": item_id, "title": str(item.get("title") or "").strip(), "brief": str(item.get("brief") or "").strip(), "description": str(item.get("description") or "").strip(), "image_url": str(item.get("image_url") or "").strip(), } def build_index_content_fields( items: List[Dict[str, Any]], tenant_id: Optional[str] = None, enrichment_scopes: Optional[List[str]] = None, category_taxonomy_profile: Optional[str] = None, ) -> List[Dict[str, Any]]: """ 高层入口:生成与 ES mapping 对齐的内容理解字段。 输入项需包含: - `id` 或 `spu_id` - `title` - 可选 `brief` / `description` / `image_url` - 可选 `enrichment_scopes`,默认同时执行 `generic` 与 `category_taxonomy` - 可选 `category_taxonomy_profile`,默认 `apparel` 返回项结构: - `id` - `qanchors` - `enriched_tags` - `enriched_attributes` - `enriched_taxonomy_attributes` - 可选 `error` 其中: - `qanchors.{lang}` 为短语数组 - `enriched_tags.{lang}` 为标签数组 """ requested_enrichment_scopes = _normalize_enrichment_scopes(enrichment_scopes) normalized_taxonomy_profile = _normalize_category_taxonomy_profile(category_taxonomy_profile) normalized_items = [_normalize_index_content_item(item) for item in items] if not normalized_items: return [] results_by_id: Dict[str, Dict[str, Any]] = { item["id"]: { "id": item["id"], "qanchors": {}, "enriched_tags": {}, "enriched_attributes": [], "enriched_taxonomy_attributes": [], } for item in normalized_items } for lang in _CORE_INDEX_LANGUAGES: if "generic" in requested_enrichment_scopes: try: rows = analyze_products( products=normalized_items, target_lang=lang, batch_size=BATCH_SIZE, tenant_id=tenant_id, analysis_kind="content", category_taxonomy_profile=normalized_taxonomy_profile, ) except Exception as e: logger.warning("build_index_content_fields content enrichment failed for lang=%s: %s", lang, e) for item in normalized_items: results_by_id[item["id"]].setdefault("error", str(e)) continue for row in rows or []: item_id = str(row.get("id") or "").strip() if not item_id or item_id not in results_by_id: continue if row.get("error"): results_by_id[item_id].setdefault("error", row["error"]) continue _apply_index_content_row(results_by_id[item_id], row=row, lang=lang) if "category_taxonomy" in requested_enrichment_scopes: for lang in _CORE_INDEX_LANGUAGES: try: taxonomy_rows = analyze_products( products=normalized_items, target_lang=lang, batch_size=BATCH_SIZE, tenant_id=tenant_id, analysis_kind="taxonomy", category_taxonomy_profile=normalized_taxonomy_profile, ) except Exception as e: logger.warning( "build_index_content_fields taxonomy enrichment failed for profile=%s lang=%s: %s", normalized_taxonomy_profile, lang, e, ) for item in normalized_items: results_by_id[item["id"]].setdefault("error", str(e)) continue for row in taxonomy_rows or []: item_id = str(row.get("id") or "").strip() if not item_id or item_id not in results_by_id: continue if row.get("error"): results_by_id[item_id].setdefault("error", row["error"]) continue _apply_index_taxonomy_row( results_by_id[item_id], row=row, lang=lang, category_taxonomy_profile=normalized_taxonomy_profile, ) return [results_by_id[item["id"]] for item in normalized_items] def _normalize_space(text: str) -> str: return re.sub(r"\s+", " ", (text or "").strip()) def _contains_cjk(text: str) -> bool: return bool(re.search(r"[\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff]", text or "")) def _truncate_by_chars(text: str, max_chars: int) -> str: return text[:max_chars].strip() def _truncate_by_words(text: str, max_words: int) -> str: words = re.findall(r"\S+", text or "") return " ".join(words[:max_words]).strip() def _detect_prompt_input_lang(text: str) -> str: # 简化处理:包含 CJK 时按中文类文本处理,否则统一按空格分词类语言处理。 return "zh" if _contains_cjk(text) else "en" def _build_prompt_input_text(product: Dict[str, Any]) -> str: """ 生成真正送入 prompt 的商品文本。 规则: - 默认使用 title - 若文本过短,则依次补 brief / description - 若文本过长,则按语言粗粒度截断 """ fields = [ _normalize_space(str(product.get("title") or "")), _normalize_space(str(product.get("brief") or "")), _normalize_space(str(product.get("description") or "")), ] parts: List[str] = [] def join_parts() -> str: return " | ".join(part for part in parts if part).strip() for field in fields: if not field: continue if field not in parts: parts.append(field) candidate = join_parts() if _detect_prompt_input_lang(candidate) == "zh": if len(candidate) >= PROMPT_INPUT_MIN_ZH_CHARS: return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS) else: if len(re.findall(r"\S+", candidate)) >= PROMPT_INPUT_MIN_WORDS: return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS) candidate = join_parts() if not candidate: return "" if _detect_prompt_input_lang(candidate) == "zh": return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS) return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS) def _make_analysis_cache_key( product: Dict[str, Any], target_lang: str, analysis_kind: str, category_taxonomy_profile: Optional[str] = None, ) -> str: """构造缓存 key,仅由分析类型、prompt 实际输入文本内容与目标语言决定。""" schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) prompt_input = _build_prompt_input_text(product) h = hashlib.md5(prompt_input.encode("utf-8")).hexdigest() prompt_contract = { "schema_name": schema.name, "cache_version": schema.cache_version, "system_message": SYSTEM_MESSAGE, "user_instruction_template": USER_INSTRUCTION_TEMPLATE, "shared_instruction": schema.shared_instruction, "assistant_headers": schema.get_headers(target_lang), "result_fields": schema.result_fields, "meaningful_fields": schema.meaningful_fields, "field_aliases": schema.field_aliases, } prompt_contract_hash = hashlib.md5( json.dumps(prompt_contract, ensure_ascii=False, sort_keys=True).encode("utf-8") ).hexdigest()[:12] return ( f"{ANCHOR_CACHE_PREFIX}:{analysis_kind}:{prompt_contract_hash}:" f"{target_lang}:{prompt_input[:4]}{h}" ) def _make_anchor_cache_key( product: Dict[str, Any], target_lang: str, ) -> str: return _make_analysis_cache_key(product, target_lang, analysis_kind="content") def _get_cached_analysis_result( product: Dict[str, Any], target_lang: str, analysis_kind: str, category_taxonomy_profile: Optional[str] = None, ) -> Optional[Dict[str, Any]]: if not _anchor_redis: return None schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) try: key = _make_analysis_cache_key( product, target_lang, analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) raw = _anchor_redis.get(key) if not raw: return None result = _normalize_analysis_result( json.loads(raw), product=product, target_lang=target_lang, schema=schema, ) if not _has_meaningful_analysis_content(result, schema): return None return result except Exception as e: logger.warning("Failed to get %s analysis cache: %s", analysis_kind, e) return None def _get_cached_anchor_result( product: Dict[str, Any], target_lang: str, ) -> Optional[Dict[str, Any]]: return _get_cached_analysis_result(product, target_lang, analysis_kind="content") def _set_cached_analysis_result( product: Dict[str, Any], target_lang: str, result: Dict[str, Any], analysis_kind: str, category_taxonomy_profile: Optional[str] = None, ) -> None: if not _anchor_redis: return schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) try: normalized = _normalize_analysis_result( result, product=product, target_lang=target_lang, schema=schema, ) if not _has_meaningful_analysis_content(normalized, schema): return key = _make_analysis_cache_key( product, target_lang, analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) ttl = ANCHOR_CACHE_EXPIRE_DAYS * 24 * 3600 _anchor_redis.setex(key, ttl, json.dumps(normalized, ensure_ascii=False)) except Exception as e: logger.warning("Failed to set %s analysis cache: %s", analysis_kind, e) def _set_cached_anchor_result( product: Dict[str, Any], target_lang: str, result: Dict[str, Any], ) -> None: _set_cached_analysis_result(product, target_lang, result, analysis_kind="content") def _build_assistant_prefix(headers: List[str]) -> str: header_line = "| " + " | ".join(headers) + " |" separator_line = "|" + "----|" * len(headers) return f"{header_line}\n{separator_line}\n" def _build_shared_context(products: List[Dict[str, str]], schema: AnalysisSchema) -> str: shared_context = schema.shared_instruction for idx, product in enumerate(products, 1): prompt_input = _build_prompt_input_text(product) shared_context += f"{idx}. {prompt_input}\n" return shared_context def _hash_text(text: str) -> str: return hashlib.md5((text or "").encode("utf-8")).hexdigest()[:12] def _mark_shared_context_logged_once(shared_context_key: str) -> bool: with _logged_shared_context_lock: if shared_context_key in _logged_shared_context_keys: _logged_shared_context_keys.move_to_end(shared_context_key) return False _logged_shared_context_keys[shared_context_key] = None if len(_logged_shared_context_keys) > LOGGED_SHARED_CONTEXT_CACHE_SIZE: _logged_shared_context_keys.popitem(last=False) return True def reset_logged_shared_context_keys() -> None: """测试辅助:清理已记录的共享 prompt key。""" with _logged_shared_context_lock: _logged_shared_context_keys.clear() def create_prompt( products: List[Dict[str, str]], target_lang: str = "zh", analysis_kind: str = "content", category_taxonomy_profile: Optional[str] = None, ) -> Tuple[Optional[str], Optional[str], Optional[str]]: """根据目标语言创建共享上下文、本地化输出要求和 Partial Mode assistant 前缀。""" schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) markdown_table_headers = schema.get_headers(target_lang) if not markdown_table_headers: logger.warning( "Unsupported target_lang for markdown table headers: kind=%s lang=%s", analysis_kind, target_lang, ) return None, None, None shared_context = _build_shared_context(products, schema) language_label = SOURCE_LANG_CODE_MAP.get(target_lang, target_lang) user_prompt = USER_INSTRUCTION_TEMPLATE.format(language=language_label).strip() assistant_prefix = _build_assistant_prefix(markdown_table_headers) return shared_context, user_prompt, assistant_prefix def _merge_partial_response(assistant_prefix: str, generated_content: str) -> str: """将 Partial Mode 的 assistant 前缀与补全文本拼成完整 markdown。""" generated = (generated_content or "").lstrip() prefix_lines = [line.strip() for line in assistant_prefix.strip().splitlines()] generated_lines = generated.splitlines() if generated_lines: first_line = generated_lines[0].strip() if prefix_lines and first_line == prefix_lines[0]: generated_lines = generated_lines[1:] if generated_lines and len(prefix_lines) > 1 and generated_lines[0].strip() == prefix_lines[1]: generated_lines = generated_lines[1:] elif len(prefix_lines) > 1 and first_line == prefix_lines[1]: generated_lines = generated_lines[1:] suffix = "\n".join(generated_lines).lstrip("\n") if suffix: return f"{assistant_prefix}{suffix}" return assistant_prefix def call_llm( shared_context: str, user_prompt: str, assistant_prefix: str, target_lang: str = "zh", analysis_kind: str = "content", ) -> Tuple[str, str]: """调用大模型 API(带重试机制),使用 Partial Mode 强制 markdown 表格前缀。""" headers = { "Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json", } shared_context_key = _hash_text(shared_context) localized_tail_key = _hash_text(f"{target_lang}\n{user_prompt}\n{assistant_prefix}") combined_user_prompt = f"{shared_context.rstrip()}\n\n{user_prompt.strip()}" payload = { "model": MODEL_NAME, "messages": [ { "role": "system", "content": SYSTEM_MESSAGE, }, { "role": "user", "content": combined_user_prompt, }, { "role": "assistant", "content": assistant_prefix, "partial": True, }, ], "temperature": 0.3, "top_p": 0.8, } request_data = { "headers": {k: v for k, v in headers.items() if k != "Authorization"}, "payload": payload, } if _mark_shared_context_logged_once(shared_context_key): logger.info(f"\n{'=' * 80}") logger.info( "LLM Shared Context [model=%s, kind=%s, shared_key=%s, chars=%s] (logged once per process key)", MODEL_NAME, analysis_kind, shared_context_key, len(shared_context), ) logger.info("\nSystem Message:\n%s", SYSTEM_MESSAGE) logger.info("\nShared Context:\n%s", shared_context) verbose_logger.info(f"\n{'=' * 80}") verbose_logger.info( "LLM Request [model=%s, kind=%s, lang=%s, shared_key=%s, tail_key=%s]:", MODEL_NAME, analysis_kind, target_lang, shared_context_key, localized_tail_key, ) verbose_logger.info(json.dumps(request_data, ensure_ascii=False, indent=2)) verbose_logger.info(f"\nCombined User Prompt:\n{combined_user_prompt}") verbose_logger.info(f"\nShared Context:\n{shared_context}") verbose_logger.info(f"\nLocalized Requirement:\n{user_prompt}") verbose_logger.info(f"\nAssistant Prefix:\n{assistant_prefix}") logger.info( "\nLLM Request Variant [kind=%s, lang=%s, shared_key=%s, tail_key=%s, prompt_chars=%s, prefix_chars=%s]", analysis_kind, target_lang, shared_context_key, localized_tail_key, len(user_prompt), len(assistant_prefix), ) logger.info("\nLocalized Requirement:\n%s", user_prompt) logger.info("\nAssistant Prefix:\n%s", assistant_prefix) # 创建session,禁用代理 session = requests.Session() session.trust_env = False # 忽略系统代理设置 try: # 重试机制 for attempt in range(MAX_RETRIES): try: response = session.post( f"{API_BASE_URL}/chat/completions", headers=headers, json=payload, timeout=REQUEST_TIMEOUT, proxies={"http": None, "https": None}, # 明确禁用代理 ) response.raise_for_status() result = response.json() usage = result.get("usage") or {} verbose_logger.info( "\nLLM Response [model=%s, kind=%s, lang=%s, shared_key=%s, tail_key=%s]:", MODEL_NAME, analysis_kind, target_lang, shared_context_key, localized_tail_key, ) verbose_logger.info(json.dumps(result, ensure_ascii=False, indent=2)) generated_content = result["choices"][0]["message"]["content"] full_markdown = _merge_partial_response(assistant_prefix, generated_content) logger.info( "\nLLM Response Summary [kind=%s, lang=%s, shared_key=%s, tail_key=%s, generated_chars=%s, completion_tokens=%s, prompt_tokens=%s, total_tokens=%s]", analysis_kind, target_lang, shared_context_key, localized_tail_key, len(generated_content or ""), usage.get("completion_tokens"), usage.get("prompt_tokens"), usage.get("total_tokens"), ) logger.info("\nGenerated Content:\n%s", generated_content) logger.info("\nMerged Markdown:\n%s", full_markdown) verbose_logger.info(f"\nGenerated Content:\n{generated_content}") verbose_logger.info(f"\nMerged Markdown:\n{full_markdown}") return full_markdown, json.dumps(result, ensure_ascii=False) except requests.exceptions.ProxyError as e: logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Proxy error - {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise except requests.exceptions.RequestException as e: logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Request error - {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise except Exception as e: logger.error(f"Unexpected error on attempt {attempt + 1}/{MAX_RETRIES}: {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise finally: session.close() def parse_markdown_table( markdown_content: str, analysis_kind: str = "content", category_taxonomy_profile: Optional[str] = None, ) -> List[Dict[str, str]]: """解析markdown表格内容""" schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) lines = markdown_content.strip().split("\n") data = [] data_started = False for line in lines: line = line.strip() if not line: continue # 表格行处理 if line.startswith("|"): # 分隔行(---- 或 :---: 等;允许空格,如 "| ---- | ---- |") sep_chars = line.replace("|", "").strip().replace(" ", "") if sep_chars and set(sep_chars) <= {"-", ":"}: data_started = True continue # 首个表头行:无论语言如何,统一跳过 if not data_started: # 等待下一行数据行 continue # 解析数据行 parts = [p.strip() for p in line.split("|")] if parts and parts[0] == "": parts = parts[1:] if parts and parts[-1] == "": parts = parts[:-1] if len(parts) >= 2: row = {"seq_no": parts[0]} for field_index, field_name in enumerate(schema.result_fields, start=1): cell = parts[field_index] if len(parts) > field_index else "" row[field_name] = _normalize_markdown_table_cell(cell) data.append(row) return data def _log_parsed_result_quality( batch_data: List[Dict[str, str]], parsed_results: List[Dict[str, str]], target_lang: str, batch_num: int, analysis_kind: str, category_taxonomy_profile: Optional[str] = None, ) -> None: schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) expected = len(batch_data) actual = len(parsed_results) if actual != expected: logger.warning( "Parsed row count mismatch for kind=%s batch=%s lang=%s: expected=%s actual=%s", analysis_kind, batch_num, target_lang, expected, actual, ) if not schema.quality_fields: logger.info( "Parsed Quality Summary [kind=%s, batch=%s, lang=%s]: rows=%s/%s", analysis_kind, batch_num, target_lang, actual, expected, ) return missing_summary = ", ".join( f"missing_{field}=" f"{sum(1 for item in parsed_results if not str(item.get(field) or '').strip())}" for field in schema.quality_fields ) logger.info( "Parsed Quality Summary [kind=%s, batch=%s, lang=%s]: rows=%s/%s, %s", analysis_kind, batch_num, target_lang, actual, expected, missing_summary, ) def process_batch( batch_data: List[Dict[str, str]], batch_num: int, target_lang: str = "zh", analysis_kind: str = "content", category_taxonomy_profile: Optional[str] = None, ) -> List[Dict[str, Any]]: """处理一个批次的数据""" schema = _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) logger.info(f"\n{'#' * 80}") logger.info( "Processing Batch %s (%s items, kind=%s)", batch_num, len(batch_data), analysis_kind, ) # 创建提示词 shared_context, user_prompt, assistant_prefix = create_prompt( batch_data, target_lang=target_lang, analysis_kind=analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) # 如果提示词创建失败(例如不支持的 target_lang),本次批次整体失败,不再继续调用 LLM if shared_context is None or user_prompt is None or assistant_prefix is None: logger.error( "Failed to create prompt for batch %s, kind=%s, target_lang=%s; " "marking entire batch as failed without calling LLM", batch_num, analysis_kind, target_lang, ) return [ _make_empty_analysis_result( item, target_lang, schema, error=f"prompt_creation_failed: unsupported target_lang={target_lang}", ) for item in batch_data ] # 调用LLM try: raw_response, full_response_json = call_llm( shared_context, user_prompt, assistant_prefix, target_lang=target_lang, analysis_kind=analysis_kind, ) # 解析结果 parsed_results = parse_markdown_table( raw_response, analysis_kind=analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) _log_parsed_result_quality( batch_data, parsed_results, target_lang, batch_num, analysis_kind, category_taxonomy_profile, ) logger.info(f"\nParsed Results ({len(parsed_results)} items):") logger.info(json.dumps(parsed_results, ensure_ascii=False, indent=2)) # 映射回原始ID results_with_ids = [] for i, parsed_item in enumerate(parsed_results): if i < len(batch_data): source_product = batch_data[i] result = _normalize_analysis_result( parsed_item, product=source_product, target_lang=target_lang, schema=schema, ) results_with_ids.append(result) logger.info( "Mapped: kind=%s seq=%s -> original_id=%s", analysis_kind, parsed_item.get("seq_no"), source_product.get("id"), ) # 保存批次 JSON 日志到独立文件 batch_log = { "batch_num": batch_num, "analysis_kind": analysis_kind, "timestamp": datetime.now().isoformat(), "input_products": batch_data, "raw_response": raw_response, "full_response_json": full_response_json, "parsed_results": parsed_results, "final_results": results_with_ids, } # 并发写 batch json 日志时,保证文件名唯一避免覆盖 batch_call_id = uuid.uuid4().hex[:12] batch_log_file = ( LOG_DIR / f"batch_{analysis_kind}_{batch_num:04d}_{timestamp}_{batch_call_id}.json" ) with open(batch_log_file, "w", encoding="utf-8") as f: json.dump(batch_log, f, ensure_ascii=False, indent=2) logger.info(f"Batch log saved to: {batch_log_file}") return results_with_ids except Exception as e: logger.error(f"Error processing batch {batch_num}: {str(e)}", exc_info=True) # 返回空结果,保持ID映射 return [ _make_empty_analysis_result(item, target_lang, schema, error=str(e)) for item in batch_data ] def analyze_products( products: List[Dict[str, str]], target_lang: str = "zh", batch_size: Optional[int] = None, tenant_id: Optional[str] = None, analysis_kind: str = "content", category_taxonomy_profile: Optional[str] = None, ) -> List[Dict[str, Any]]: """ 库调用入口:根据输入+语言,返回锚文本及各维度信息。 Args: products: [{"id": "...", "title": "..."}] target_lang: 输出语言 batch_size: 批大小,默认使用全局 BATCH_SIZE """ if not API_KEY: raise RuntimeError("DASHSCOPE_API_KEY is not set, cannot call LLM") if not products: return [] _get_analysis_schema( analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) results_by_index: List[Optional[Dict[str, Any]]] = [None] * len(products) uncached_items: List[Tuple[int, Dict[str, str]]] = [] for idx, product in enumerate(products): title = str(product.get("title") or "").strip() if not title: uncached_items.append((idx, product)) continue cached = _get_cached_analysis_result( product, target_lang, analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) if cached: logger.info( f"[analyze_products] Cache hit for title='{title[:50]}...', " f"kind={analysis_kind}, lang={target_lang}" ) results_by_index[idx] = cached continue uncached_items.append((idx, product)) if not uncached_items: return [item for item in results_by_index if item is not None] # call_llm 一次处理上限固定为 BATCH_SIZE(默认 20): # - 尽可能攒批处理; # - 即便调用方传入更大的 batch_size,也会自动按上限拆批。 req_bs = BATCH_SIZE if batch_size is None else int(batch_size) bs = max(1, min(req_bs, BATCH_SIZE)) total_batches = (len(uncached_items) + bs - 1) // bs batch_jobs: List[Tuple[int, List[Tuple[int, Dict[str, str]]], List[Dict[str, str]]]] = [] for i in range(0, len(uncached_items), bs): batch_num = i // bs + 1 batch_slice = uncached_items[i : i + bs] batch = [item for _, item in batch_slice] batch_jobs.append((batch_num, batch_slice, batch)) # 只有一个批次时走串行,减少线程池创建开销与日志/日志文件的不可控交织 if total_batches <= 1 or CONTENT_UNDERSTANDING_MAX_WORKERS <= 1: for batch_num, batch_slice, batch in batch_jobs: logger.info( f"[analyze_products] Processing batch {batch_num}/{total_batches}, " f"size={len(batch)}, kind={analysis_kind}, target_lang={target_lang}" ) batch_results = process_batch( batch, batch_num=batch_num, target_lang=target_lang, analysis_kind=analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) for (original_idx, product), item in zip(batch_slice, batch_results): results_by_index[original_idx] = item title_input = str(item.get("title_input") or "").strip() if not title_input: continue if item.get("error"): # 不缓存错误结果,避免放大临时故障 continue try: _set_cached_analysis_result( product, target_lang, item, analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) except Exception: # 已在内部记录 warning pass else: max_workers = min(CONTENT_UNDERSTANDING_MAX_WORKERS, len(batch_jobs)) logger.info( "[analyze_products] Using ThreadPoolExecutor for uncached batches: " "max_workers=%s, total_batches=%s, bs=%s, kind=%s, target_lang=%s", max_workers, total_batches, bs, analysis_kind, target_lang, ) # 只把“LLM 调用 + markdown 解析”放到线程里;Redis get/set 保持在主线程,避免并发写入带来额外风险。 # 注意:线程池是模块级单例,因此这里的 max_workers 主要用于日志语义(实际并发受单例池上限约束)。 executor = _get_content_understanding_executor() future_by_batch_num: Dict[int, Any] = {} for batch_num, _batch_slice, batch in batch_jobs: future_by_batch_num[batch_num] = executor.submit( process_batch, batch, batch_num=batch_num, target_lang=target_lang, analysis_kind=analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) # 按 batch_num 回填,确保输出稳定(results_by_index 是按原始 input index 映射的) for batch_num, batch_slice, _batch in batch_jobs: batch_results = future_by_batch_num[batch_num].result() for (original_idx, product), item in zip(batch_slice, batch_results): results_by_index[original_idx] = item title_input = str(item.get("title_input") or "").strip() if not title_input: continue if item.get("error"): # 不缓存错误结果,避免放大临时故障 continue try: _set_cached_analysis_result( product, target_lang, item, analysis_kind, category_taxonomy_profile=category_taxonomy_profile, ) except Exception: # 已在内部记录 warning pass return [item for item in results_by_index if item is not None]