#!/usr/bin/env python3 """ 商品内容理解与属性补充模块(product_enrich) 提供基于 LLM 的商品锚文本 / 语义属性 / 标签等分析能力, 供 indexer 与 API 在内存中调用(不再负责 CSV 读写)。 """ import os import json import logging import re import time import hashlib from collections import OrderedDict from datetime import datetime from typing import List, Dict, Tuple, Any, Optional import redis import requests from pathlib import Path from config.env_config import REDIS_CONFIG from config.tenant_config_loader import SOURCE_LANG_CODE_MAP from indexer.product_enrich_prompts import ( SYSTEM_MESSAGE, USER_INSTRUCTION_TEMPLATE, LANGUAGE_MARKDOWN_TABLE_HEADERS, SHARED_ANALYSIS_INSTRUCTION, ) # 配置 BATCH_SIZE = 20 # 华北2(北京):https://dashscope.aliyuncs.com/compatible-mode/v1 # 新加坡:https://dashscope-intl.aliyuncs.com/compatible-mode/v1 # 美国(弗吉尼亚):https://dashscope-us.aliyuncs.com/compatible-mode/v1 API_BASE_URL = "https://dashscope-us.aliyuncs.com/compatible-mode/v1" MODEL_NAME = "qwen-flash" API_KEY = os.environ.get("DASHSCOPE_API_KEY") MAX_RETRIES = 3 RETRY_DELAY = 5 # 秒 REQUEST_TIMEOUT = 180 # 秒 LOGGED_SHARED_CONTEXT_CACHE_SIZE = 256 PROMPT_INPUT_MIN_ZH_CHARS = 20 PROMPT_INPUT_MAX_ZH_CHARS = 100 PROMPT_INPUT_MIN_WORDS = 16 PROMPT_INPUT_MAX_WORDS = 80 # 日志路径 OUTPUT_DIR = Path("output_logs") LOG_DIR = OUTPUT_DIR / "logs" # 设置独立日志(不影响全局 indexer.log) LOG_DIR.mkdir(parents=True, exist_ok=True) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") log_file = LOG_DIR / f"product_enrich_{timestamp}.log" verbose_log_file = LOG_DIR / "product_enrich_verbose.log" _logged_shared_context_keys: "OrderedDict[str, None]" = OrderedDict() # 主日志 logger:执行流程、批次信息等 logger = logging.getLogger("product_enrich") logger.setLevel(logging.INFO) if not logger.handlers: formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") file_handler = logging.FileHandler(log_file, encoding="utf-8") file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.addHandler(stream_handler) # 避免日志向根 logger 传播,防止写入 logs/indexer.log 等其他文件 logger.propagate = False # 详尽日志 logger:专门记录 LLM 请求与响应 verbose_logger = logging.getLogger("product_enrich_verbose") verbose_logger.setLevel(logging.INFO) if not verbose_logger.handlers: verbose_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") verbose_file_handler = logging.FileHandler(verbose_log_file, encoding="utf-8") verbose_file_handler.setFormatter(verbose_formatter) verbose_logger.addHandler(verbose_file_handler) verbose_logger.propagate = False logger.info("Verbose LLM logs are written to: %s", verbose_log_file) # Redis 缓存(用于 anchors / 语义属性) ANCHOR_CACHE_PREFIX = REDIS_CONFIG.get("anchor_cache_prefix", "product_anchors") ANCHOR_CACHE_EXPIRE_DAYS = int(REDIS_CONFIG.get("anchor_cache_expire_days", 30)) _anchor_redis: Optional[redis.Redis] = None try: _anchor_redis = redis.Redis( host=REDIS_CONFIG.get("host", "localhost"), port=REDIS_CONFIG.get("port", 6479), password=REDIS_CONFIG.get("password"), decode_responses=True, socket_timeout=REDIS_CONFIG.get("socket_timeout", 1), socket_connect_timeout=REDIS_CONFIG.get("socket_connect_timeout", 1), retry_on_timeout=REDIS_CONFIG.get("retry_on_timeout", False), health_check_interval=10, ) _anchor_redis.ping() logger.info("Redis cache initialized for product anchors and semantic attributes") except Exception as e: logger.warning(f"Failed to initialize Redis for anchors cache: {e}") _anchor_redis = None _missing_prompt_langs = sorted(set(SOURCE_LANG_CODE_MAP) - set(LANGUAGE_MARKDOWN_TABLE_HEADERS)) if _missing_prompt_langs: raise RuntimeError( f"Missing product_enrich prompt config for languages: {_missing_prompt_langs}" ) def _normalize_space(text: str) -> str: return re.sub(r"\s+", " ", (text or "").strip()) def _contains_cjk(text: str) -> bool: return bool(re.search(r"[\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff]", text or "")) def _truncate_by_chars(text: str, max_chars: int) -> str: return text[:max_chars].strip() def _truncate_by_words(text: str, max_words: int) -> str: words = re.findall(r"\S+", text or "") return " ".join(words[:max_words]).strip() def _detect_prompt_input_lang(text: str) -> str: # 简化处理:包含 CJK 时按中文类文本处理,否则统一按空格分词类语言处理。 return "zh" if _contains_cjk(text) else "en" def _build_prompt_input_text(product: Dict[str, Any]) -> str: """ 生成真正送入 prompt 的商品文本。 规则: - 默认使用 title - 若文本过短,则依次补 brief / description - 若文本过长,则按语言粗粒度截断 """ fields = [ _normalize_space(str(product.get("title") or "")), _normalize_space(str(product.get("brief") or "")), _normalize_space(str(product.get("description") or "")), ] parts: List[str] = [] def join_parts() -> str: return " | ".join(part for part in parts if part).strip() for field in fields: if not field: continue if field not in parts: parts.append(field) candidate = join_parts() if _detect_prompt_input_lang(candidate) == "zh": if len(candidate) >= PROMPT_INPUT_MIN_ZH_CHARS: return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS) else: if len(re.findall(r"\S+", candidate)) >= PROMPT_INPUT_MIN_WORDS: return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS) candidate = join_parts() if not candidate: return "" if _detect_prompt_input_lang(candidate) == "zh": return _truncate_by_chars(candidate, PROMPT_INPUT_MAX_ZH_CHARS) return _truncate_by_words(candidate, PROMPT_INPUT_MAX_WORDS) def _make_anchor_cache_key( product: Dict[str, Any], target_lang: str, ) -> str: """构造缓存 key,仅由 prompt 实际输入文本内容 + 目标语言决定。""" prompt_input = _build_prompt_input_text(product) h = hashlib.md5(prompt_input.encode("utf-8")).hexdigest() return f"{ANCHOR_CACHE_PREFIX}:{target_lang}:{prompt_input[:4]}{h}" def _get_cached_anchor_result( product: Dict[str, Any], target_lang: str, ) -> Optional[Dict[str, Any]]: if not _anchor_redis: return None try: key = _make_anchor_cache_key(product, target_lang) raw = _anchor_redis.get(key) if not raw: return None return json.loads(raw) except Exception as e: logger.warning(f"Failed to get anchor cache: {e}") return None def _set_cached_anchor_result( product: Dict[str, Any], target_lang: str, result: Dict[str, Any], ) -> None: if not _anchor_redis: return try: key = _make_anchor_cache_key(product, target_lang) ttl = ANCHOR_CACHE_EXPIRE_DAYS * 24 * 3600 _anchor_redis.setex(key, ttl, json.dumps(result, ensure_ascii=False)) except Exception as e: logger.warning(f"Failed to set anchor cache: {e}") def _build_assistant_prefix(headers: List[str]) -> str: header_line = "| " + " | ".join(headers) + " |" separator_line = "|" + "----|" * len(headers) return f"{header_line}\n{separator_line}\n" def _build_shared_context(products: List[Dict[str, str]]) -> str: shared_context = SHARED_ANALYSIS_INSTRUCTION for idx, product in enumerate(products, 1): prompt_input = _build_prompt_input_text(product) shared_context += f"{idx}. {prompt_input}\n" return shared_context def _hash_text(text: str) -> str: return hashlib.md5((text or "").encode("utf-8")).hexdigest()[:12] def _mark_shared_context_logged_once(shared_context_key: str) -> bool: if shared_context_key in _logged_shared_context_keys: _logged_shared_context_keys.move_to_end(shared_context_key) return False _logged_shared_context_keys[shared_context_key] = None if len(_logged_shared_context_keys) > LOGGED_SHARED_CONTEXT_CACHE_SIZE: _logged_shared_context_keys.popitem(last=False) return True def reset_logged_shared_context_keys() -> None: """测试辅助:清理已记录的共享 prompt key。""" _logged_shared_context_keys.clear() def create_prompt( products: List[Dict[str, str]], target_lang: str = "zh", ) -> Tuple[str, str, str]: """根据目标语言创建共享上下文、本地化输出要求和 Partial Mode assistant 前缀。""" markdown_table_headers = LANGUAGE_MARKDOWN_TABLE_HEADERS.get(target_lang) if not markdown_table_headers: logger.warning( "Unsupported target_lang for markdown table headers: %s", target_lang, ) return None, None, None shared_context = _build_shared_context(products) language_label = SOURCE_LANG_CODE_MAP.get(target_lang, target_lang) user_prompt = USER_INSTRUCTION_TEMPLATE.format(language=language_label).strip() assistant_prefix = _build_assistant_prefix(markdown_table_headers) return shared_context, user_prompt, assistant_prefix def _merge_partial_response(assistant_prefix: str, generated_content: str) -> str: """将 Partial Mode 的 assistant 前缀与补全文本拼成完整 markdown。""" generated = (generated_content or "").lstrip() prefix_lines = [line.strip() for line in assistant_prefix.strip().splitlines()] generated_lines = generated.splitlines() if generated_lines: first_line = generated_lines[0].strip() if prefix_lines and first_line == prefix_lines[0]: generated_lines = generated_lines[1:] if generated_lines and len(prefix_lines) > 1 and generated_lines[0].strip() == prefix_lines[1]: generated_lines = generated_lines[1:] elif len(prefix_lines) > 1 and first_line == prefix_lines[1]: generated_lines = generated_lines[1:] suffix = "\n".join(generated_lines).lstrip("\n") if suffix: return f"{assistant_prefix}{suffix}" return assistant_prefix def call_llm( shared_context: str, user_prompt: str, assistant_prefix: str, target_lang: str = "zh", ) -> Tuple[str, str]: """调用大模型 API(带重试机制),使用 Partial Mode 强制 markdown 表格前缀。""" headers = { "Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json", } shared_context_key = _hash_text(shared_context) localized_tail_key = _hash_text(f"{target_lang}\n{user_prompt}\n{assistant_prefix}") combined_user_prompt = f"{shared_context.rstrip()}\n\n{user_prompt.strip()}" payload = { "model": MODEL_NAME, "messages": [ { "role": "system", "content": SYSTEM_MESSAGE, }, { "role": "user", "content": combined_user_prompt, }, { "role": "assistant", "content": assistant_prefix, "partial": True, }, ], "temperature": 0.3, "top_p": 0.8, } request_data = { "headers": {k: v for k, v in headers.items() if k != "Authorization"}, "payload": payload, } if _mark_shared_context_logged_once(shared_context_key): logger.info(f"\n{'=' * 80}") logger.info( "LLM Shared Context [model=%s, shared_key=%s, chars=%s] (logged once per process key)", MODEL_NAME, shared_context_key, len(shared_context), ) logger.info("\nSystem Message:\n%s", SYSTEM_MESSAGE) logger.info("\nShared Context:\n%s", shared_context) verbose_logger.info(f"\n{'=' * 80}") verbose_logger.info( "LLM Request [model=%s, lang=%s, shared_key=%s, tail_key=%s]:", MODEL_NAME, target_lang, shared_context_key, localized_tail_key, ) verbose_logger.info(json.dumps(request_data, ensure_ascii=False, indent=2)) verbose_logger.info(f"\nCombined User Prompt:\n{combined_user_prompt}") verbose_logger.info(f"\nShared Context:\n{shared_context}") verbose_logger.info(f"\nLocalized Requirement:\n{user_prompt}") verbose_logger.info(f"\nAssistant Prefix:\n{assistant_prefix}") logger.info( "\nLLM Request Variant [lang=%s, shared_key=%s, tail_key=%s, prompt_chars=%s, prefix_chars=%s]", target_lang, shared_context_key, localized_tail_key, len(user_prompt), len(assistant_prefix), ) logger.info("\nLocalized Requirement:\n%s", user_prompt) logger.info("\nAssistant Prefix:\n%s", assistant_prefix) # 创建session,禁用代理 session = requests.Session() session.trust_env = False # 忽略系统代理设置 try: # 重试机制 for attempt in range(MAX_RETRIES): try: response = session.post( f"{API_BASE_URL}/chat/completions", headers=headers, json=payload, timeout=REQUEST_TIMEOUT, proxies={"http": None, "https": None}, # 明确禁用代理 ) response.raise_for_status() result = response.json() usage = result.get("usage") or {} verbose_logger.info( "\nLLM Response [model=%s, lang=%s, shared_key=%s, tail_key=%s]:", MODEL_NAME, target_lang, shared_context_key, localized_tail_key, ) verbose_logger.info(json.dumps(result, ensure_ascii=False, indent=2)) generated_content = result["choices"][0]["message"]["content"] full_markdown = _merge_partial_response(assistant_prefix, generated_content) logger.info( "\nLLM Response Summary [lang=%s, shared_key=%s, tail_key=%s, generated_chars=%s, completion_tokens=%s, prompt_tokens=%s, total_tokens=%s]", target_lang, shared_context_key, localized_tail_key, len(generated_content or ""), usage.get("completion_tokens"), usage.get("prompt_tokens"), usage.get("total_tokens"), ) logger.info("\nGenerated Content:\n%s", generated_content) logger.info("\nMerged Markdown:\n%s", full_markdown) verbose_logger.info(f"\nGenerated Content:\n{generated_content}") verbose_logger.info(f"\nMerged Markdown:\n{full_markdown}") return full_markdown, json.dumps(result, ensure_ascii=False) except requests.exceptions.ProxyError as e: logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Proxy error - {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise except requests.exceptions.RequestException as e: logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Request error - {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise except Exception as e: logger.error(f"Unexpected error on attempt {attempt + 1}/{MAX_RETRIES}: {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise finally: session.close() def parse_markdown_table(markdown_content: str) -> List[Dict[str, str]]: """解析markdown表格内容""" lines = markdown_content.strip().split("\n") data = [] data_started = False for line in lines: line = line.strip() if not line: continue # 表格行处理 if line.startswith("|"): # 分隔行(---- 或 :---: 等;允许空格,如 "| ---- | ---- |") sep_chars = line.replace("|", "").strip().replace(" ", "") if sep_chars and set(sep_chars) <= {"-", ":"}: data_started = True continue # 首个表头行:无论语言如何,统一跳过 if not data_started: # 等待下一行数据行 continue # 解析数据行 parts = [p.strip() for p in line.split("|")] parts = [p for p in parts if p] # 移除空字符串 if len(parts) >= 2: row = { "seq_no": parts[0], "title": parts[1], # 商品标题(按目标语言) "category_path": parts[2] if len(parts) > 2 else "", # 品类路径 "tags": parts[3] if len(parts) > 3 else "", # 细分标签 "target_audience": parts[4] if len(parts) > 4 else "", # 适用人群 "usage_scene": parts[5] if len(parts) > 5 else "", # 使用场景 "season": parts[6] if len(parts) > 6 else "", # 适用季节 "key_attributes": parts[7] if len(parts) > 7 else "", # 关键属性 "material": parts[8] if len(parts) > 8 else "", # 材质说明 "features": parts[9] if len(parts) > 9 else "", # 功能特点 "anchor_text": parts[10] if len(parts) > 10 else "", # 锚文本 } data.append(row) return data def _log_parsed_result_quality( batch_data: List[Dict[str, str]], parsed_results: List[Dict[str, str]], target_lang: str, batch_num: int, ) -> None: expected = len(batch_data) actual = len(parsed_results) if actual != expected: logger.warning( "Parsed row count mismatch for batch=%s lang=%s: expected=%s actual=%s", batch_num, target_lang, expected, actual, ) missing_anchor = sum(1 for item in parsed_results if not str(item.get("anchor_text") or "").strip()) missing_category = sum(1 for item in parsed_results if not str(item.get("category_path") or "").strip()) missing_title = sum(1 for item in parsed_results if not str(item.get("title") or "").strip()) logger.info( "Parsed Quality Summary [batch=%s, lang=%s]: rows=%s/%s, missing_title=%s, missing_category=%s, missing_anchor=%s", batch_num, target_lang, actual, expected, missing_title, missing_category, missing_anchor, ) def process_batch( batch_data: List[Dict[str, str]], batch_num: int, target_lang: str = "zh", ) -> List[Dict[str, str]]: """处理一个批次的数据""" logger.info(f"\n{'#' * 80}") logger.info(f"Processing Batch {batch_num} ({len(batch_data)} items)") # 创建提示词 shared_context, user_prompt, assistant_prefix = create_prompt( batch_data, target_lang=target_lang, ) # 如果提示词创建失败(例如不支持的 target_lang),本次批次整体失败,不再继续调用 LLM if shared_context is None or user_prompt is None or assistant_prefix is None: logger.error( "Failed to create prompt for batch %s, target_lang=%s; " "marking entire batch as failed without calling LLM", batch_num, target_lang, ) return [ { "id": item["id"], "lang": target_lang, "title_input": item.get("title", ""), "title": "", "category_path": "", "tags": "", "target_audience": "", "usage_scene": "", "season": "", "key_attributes": "", "material": "", "features": "", "anchor_text": "", "error": f"prompt_creation_failed: unsupported target_lang={target_lang}", } for item in batch_data ] # 调用LLM try: raw_response, full_response_json = call_llm( shared_context, user_prompt, assistant_prefix, target_lang=target_lang, ) # 解析结果 parsed_results = parse_markdown_table(raw_response) _log_parsed_result_quality(batch_data, parsed_results, target_lang, batch_num) logger.info(f"\nParsed Results ({len(parsed_results)} items):") logger.info(json.dumps(parsed_results, ensure_ascii=False, indent=2)) # 映射回原始ID results_with_ids = [] for i, parsed_item in enumerate(parsed_results): if i < len(batch_data): original_id = batch_data[i]["id"] result = { "id": original_id, "lang": target_lang, "title_input": batch_data[i]["title"], # 原始输入标题 "title": parsed_item.get("title", ""), # 模型生成的标题 "category_path": parsed_item.get("category_path", ""), # 品类路径 "tags": parsed_item.get("tags", ""), # 细分标签 "target_audience": parsed_item.get("target_audience", ""), # 适用人群 "usage_scene": parsed_item.get("usage_scene", ""), # 使用场景 "season": parsed_item.get("season", ""), # 适用季节 "key_attributes": parsed_item.get("key_attributes", ""), # 关键属性 "material": parsed_item.get("material", ""), # 材质说明 "features": parsed_item.get("features", ""), # 功能特点 "anchor_text": parsed_item.get("anchor_text", ""), # 锚文本 } results_with_ids.append(result) logger.info(f"Mapped: seq={parsed_item['seq_no']} -> original_id={original_id}") # 保存批次 JSON 日志到独立文件 batch_log = { "batch_num": batch_num, "timestamp": datetime.now().isoformat(), "input_products": batch_data, "raw_response": raw_response, "full_response_json": full_response_json, "parsed_results": parsed_results, "final_results": results_with_ids, } batch_log_file = LOG_DIR / f"batch_{batch_num:04d}_{timestamp}.json" with open(batch_log_file, "w", encoding="utf-8") as f: json.dump(batch_log, f, ensure_ascii=False, indent=2) logger.info(f"Batch log saved to: {batch_log_file}") return results_with_ids except Exception as e: logger.error(f"Error processing batch {batch_num}: {str(e)}", exc_info=True) # 返回空结果,保持ID映射 return [ { "id": item["id"], "lang": target_lang, "title_input": item["title"], "title": "", "category_path": "", "tags": "", "target_audience": "", "usage_scene": "", "season": "", "key_attributes": "", "material": "", "features": "", "anchor_text": "", "error": str(e), } for item in batch_data ] def analyze_products( products: List[Dict[str, str]], target_lang: str = "zh", batch_size: Optional[int] = None, tenant_id: Optional[str] = None, ) -> List[Dict[str, Any]]: """ 库调用入口:根据输入+语言,返回锚文本及各维度信息。 Args: products: [{"id": "...", "title": "..."}] target_lang: 输出语言 batch_size: 批大小,默认使用全局 BATCH_SIZE """ if not API_KEY: raise RuntimeError("DASHSCOPE_API_KEY is not set, cannot call LLM") if not products: return [] results_by_index: List[Optional[Dict[str, Any]]] = [None] * len(products) uncached_items: List[Tuple[int, Dict[str, str]]] = [] for idx, product in enumerate(products): title = str(product.get("title") or "").strip() if not title: uncached_items.append((idx, product)) continue cached = _get_cached_anchor_result(product, target_lang) if cached: logger.info( f"[analyze_products] Cache hit for title='{title[:50]}...', " f"lang={target_lang}" ) results_by_index[idx] = cached continue uncached_items.append((idx, product)) if not uncached_items: return [item for item in results_by_index if item is not None] # call_llm 一次处理上限固定为 BATCH_SIZE(默认 20): # - 尽可能攒批处理; # - 即便调用方传入更大的 batch_size,也会自动按上限拆批。 req_bs = BATCH_SIZE if batch_size is None else int(batch_size) bs = max(1, min(req_bs, BATCH_SIZE)) total_batches = (len(uncached_items) + bs - 1) // bs for i in range(0, len(uncached_items), bs): batch_num = i // bs + 1 batch_slice = uncached_items[i : i + bs] batch = [item for _, item in batch_slice] logger.info( f"[analyze_products] Processing batch {batch_num}/{total_batches}, " f"size={len(batch)}, target_lang={target_lang}" ) batch_results = process_batch(batch, batch_num=batch_num, target_lang=target_lang) for (original_idx, product), item in zip(batch_slice, batch_results): results_by_index[original_idx] = item title_input = str(item.get("title_input") or "").strip() if not title_input: continue if item.get("error"): # 不缓存错误结果,避免放大临时故障 continue try: _set_cached_anchor_result(product, target_lang, item) except Exception: # 已在内部记录 warning pass return [item for item in results_by_index if item is not None]