#!/usr/bin/env python3 """ 商品内容理解与属性补充模块(product_enrich) 提供基于 LLM 的商品锚文本 / 语义属性 / 标签等分析能力, 供 indexer 与 API 在内存中调用(不再负责 CSV 读写)。 """ import os import json import logging import time import hashlib from datetime import datetime from typing import List, Dict, Tuple, Any, Optional import redis import requests from pathlib import Path from config.env_config import REDIS_CONFIG from config.tenant_config_loader import SOURCE_LANG_CODE_MAP # 配置 BATCH_SIZE = 20 # 华北2(北京):https://dashscope.aliyuncs.com/compatible-mode/v1 # 新加坡:https://dashscope-intl.aliyuncs.com/compatible-mode/v1 # 美国(弗吉尼亚):https://dashscope-us.aliyuncs.com/compatible-mode/v1 API_BASE_URL = "https://dashscope-us.aliyuncs.com/compatible-mode/v1" MODEL_NAME = "qwen-flash" API_KEY = os.environ.get("DASHSCOPE_API_KEY") MAX_RETRIES = 3 RETRY_DELAY = 5 # 秒 REQUEST_TIMEOUT = 180 # 秒 # 日志路径 OUTPUT_DIR = Path("output_logs") LOG_DIR = OUTPUT_DIR / "logs" # 设置独立日志(不影响全局 indexer.log) LOG_DIR.mkdir(parents=True, exist_ok=True) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") log_file = LOG_DIR / f"product_enrich_{timestamp}.log" verbose_log_file = LOG_DIR / "product_enrich_verbose.log" # 主日志 logger:执行流程、批次信息等 logger = logging.getLogger("product_enrich") logger.setLevel(logging.INFO) if not logger.handlers: formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") file_handler = logging.FileHandler(log_file, encoding="utf-8") file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.addHandler(stream_handler) # 避免日志向根 logger 传播,防止写入 logs/indexer.log 等其他文件 logger.propagate = False # 详尽日志 logger:专门记录 LLM 请求与响应 verbose_logger = logging.getLogger("product_enrich_verbose") verbose_logger.setLevel(logging.INFO) if not verbose_logger.handlers: verbose_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") verbose_file_handler = logging.FileHandler(verbose_log_file, encoding="utf-8") verbose_file_handler.setFormatter(verbose_formatter) verbose_logger.addHandler(verbose_file_handler) verbose_logger.propagate = False # Redis 缓存(用于 anchors / 语义属性) ANCHOR_CACHE_PREFIX = REDIS_CONFIG.get("anchor_cache_prefix", "product_anchors") ANCHOR_CACHE_EXPIRE_DAYS = int(REDIS_CONFIG.get("anchor_cache_expire_days", 30)) _anchor_redis: Optional[redis.Redis] = None try: _anchor_redis = redis.Redis( host=REDIS_CONFIG.get("host", "localhost"), port=REDIS_CONFIG.get("port", 6479), password=REDIS_CONFIG.get("password"), decode_responses=True, socket_timeout=REDIS_CONFIG.get("socket_timeout", 1), socket_connect_timeout=REDIS_CONFIG.get("socket_connect_timeout", 1), retry_on_timeout=REDIS_CONFIG.get("retry_on_timeout", False), health_check_interval=10, ) _anchor_redis.ping() logger.info("Redis cache initialized for product anchors and semantic attributes") except Exception as e: logger.warning(f"Failed to initialize Redis for anchors cache: {e}") _anchor_redis = None # 中文版本提示词(请勿删除): # "你是一名电商平台的商品标注员,你的工作是对输入的每个商品进行理解、分析和标注," # "并按要求格式返回 Markdown 表格。所有输出内容必须为中文。" SYSTEM_MESSAGES = ( "You are a product annotator for an e-commerce platform. " "For each input product, you must understand, analyze and label it, " "and return a Markdown table strictly following the requested format. " "All output must be in English." ) def _make_anchor_cache_key( title: str, target_lang: str, tenant_id: Optional[str] = None, ) -> str: """构造 anchors/语义属性的缓存 key。""" base = (tenant_id or "global").strip() h = hashlib.md5(title.encode("utf-8")).hexdigest() return f"{ANCHOR_CACHE_PREFIX}:{base}:{target_lang}:{h}" def _get_cached_anchor_result( title: str, target_lang: str, tenant_id: Optional[str] = None, ) -> Optional[Dict[str, Any]]: if not _anchor_redis: return None try: key = _make_anchor_cache_key(title, target_lang, tenant_id) raw = _anchor_redis.get(key) if not raw: return None return json.loads(raw) except Exception as e: logger.warning(f"Failed to get anchor cache: {e}") return None def _set_cached_anchor_result( title: str, target_lang: str, result: Dict[str, Any], tenant_id: Optional[str] = None, ) -> None: if not _anchor_redis: return try: key = _make_anchor_cache_key(title, target_lang, tenant_id) ttl = ANCHOR_CACHE_EXPIRE_DAYS * 24 * 3600 _anchor_redis.setex(key, ttl, json.dumps(result, ensure_ascii=False)) except Exception as e: logger.warning(f"Failed to set anchor cache: {e}") def create_prompt(products: List[Dict[str, str]], target_lang: str = "zh") -> str: """根据目标语言创建 LLM 提示词和表头说明。 约定: - 提示词始终使用英文; - 当 target_lang == "en" 时,直接要求用英文分析并输出英文表头; - 当 target_lang 为其他语言时,视作“多轮对话”的后续轮次: * 默认上一轮已经用英文完成了分析; * 当前轮只需要在保持结构和含义不变的前提下,将整张表格翻译为目标语言, 包含表头与所有单元格内容。 """ lang_name = SOURCE_LANG_CODE_MAP.get(target_lang, target_lang) # 中文版本提示词(请勿删除) # prompt = """请对输入的每条商品标题,分析并提取以下信息: # 1. 商品标题:将输入商品名称翻译为自然、完整的中文商品标题 # 2. 品类路径:从大类到细分品类,用">"分隔(例如:服装>女装>裤子>工装裤) # 3. 细分标签:商品的风格、特点、功能等(例如:碎花,收腰,法式) # 4. 适用人群:性别/年龄段等(例如:年轻女性) # 5. 使用场景 # 6. 适用季节 # 7. 关键属性 # 8. 材质说明 # 9. 功能特点 # 10. 商品卖点:分析和提取一句话核心卖点,用于推荐理由 # 11. 锚文本:生成一组能够代表该商品、并可能被用户用于搜索的词语或短语。这些词语应覆盖用户需求的各个维度,如品类、细分标签、功能特性、需求场景等等。 # 输入商品列表: # """ # prompt_tail = """ # 请严格按照以下markdown表格格式返回,每列内部的多值内容都用逗号分隔,不要添加任何其他说明: # | 序号 | 商品标题 | 品类路径 | 细分标签 | 适用人群 | 使用场景 | 适用季节 | 关键属性 | 材质说明 | 功能特点 | 商品卖点 | 锚文本 | # |----|----|----|----|----|----|----|----|----|----|----|----| # """ prompt = """Please analyze each input product title and extract the following information: 1. Product title: a natural English product name derived from the input title 2. Category path: from broad to fine-grained category, separated by ">" (e.g. Clothing>Women>Dresses>Work Dress) 3. Fine-grained tags: style / features / attributes (e.g. floral, waist-cinching, French style) 4. Target audience: gender / age group, etc. (e.g. young women) 5. Usage scene 6. Applicable season 7. Key attributes 8. Material description 9. Functional features 10. Selling point: one concise key selling sentence for recommendation 11. Anchor text: a set of words or phrases that could be used by users as search queries for this product, covering category, fine-grained tags, functional attributes, usage scenes, etc. Input product list: """ for idx, product in enumerate(products, 1): prompt += f'{idx}. {product["title"]}\n' if target_lang == "en": # 英文首轮:直接要求英文表头 + 英文内容 prompt += """ Please strictly return a Markdown table in the following format. For any column that can contain multiple values, separate values with commas. Do not add any other explanations: | No. | Product title | Category path | Fine-grained tags | Target audience | Usage scene | Season | Key attributes | Material | Features | Selling point | Anchor text | |----|----|----|----|----|----|----|----|----|----|----|----| """ else: # 非英文语言:视作“下一轮对话”,只做翻译,要求表头与内容全部用目标语言 prompt += f""" Now we will output the same table in {lang_name}. IMPORTANT: - Assume you have already generated the full table in English in a previous round. - In this round, you must output exactly the same table structure and content, but fully translated into {lang_name}, including ALL column headers and ALL cell values. - Do NOT change the meaning, fields, or the number/order of rows and columns. - Keep valid Markdown table syntax. Please return ONLY the Markdown table in {lang_name}, without any extra explanations. """ return prompt def call_llm(prompt: str, target_lang: str = "zh") -> Tuple[str, str]: """调用大模型API(带重试机制),按目标语言选择系统提示词。""" headers = { "Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json", } payload = { "model": MODEL_NAME, "messages": [ { "role": "system", "content": SYSTEM_MESSAGES, }, { "role": "user", "content": prompt, }, ], "temperature": 0.3, "top_p": 0.8, } request_data = { "headers": {k: v for k, v in headers.items() if k != "Authorization"}, "payload": payload, } # 主日志 + 详尽日志:LLM Request logger.info(f"\n{'=' * 80}") logger.info(f"LLM Request (Model: {MODEL_NAME}):") logger.info(json.dumps(request_data, ensure_ascii=False, indent=2)) logger.info(f"\nPrompt:\n{prompt}") verbose_logger.info(f"\n{'=' * 80}") verbose_logger.info(f"LLM Request (Model: {MODEL_NAME}):") verbose_logger.info(json.dumps(request_data, ensure_ascii=False, indent=2)) verbose_logger.info(f"\nPrompt:\n{prompt}") # 创建session,禁用代理 session = requests.Session() session.trust_env = False # 忽略系统代理设置 try: # 重试机制 for attempt in range(MAX_RETRIES): try: response = session.post( f"{API_BASE_URL}/chat/completions", headers=headers, json=payload, timeout=REQUEST_TIMEOUT, proxies={"http": None, "https": None}, # 明确禁用代理 ) response.raise_for_status() result = response.json() # 主日志 + 详尽日志:LLM Response logger.info(f"\nLLM Response:") logger.info(json.dumps(result, ensure_ascii=False, indent=2)) verbose_logger.info(f"\nLLM Response:") verbose_logger.info(json.dumps(result, ensure_ascii=False, indent=2)) content = result["choices"][0]["message"]["content"] logger.info(f"\nExtracted Content:\n{content}") verbose_logger.info(f"\nExtracted Content:\n{content}") return content, json.dumps(result, ensure_ascii=False) except requests.exceptions.ProxyError as e: logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Proxy error - {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise except requests.exceptions.RequestException as e: logger.warning(f"Attempt {attempt + 1}/{MAX_RETRIES}: Request error - {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise except Exception as e: logger.error(f"Unexpected error on attempt {attempt + 1}/{MAX_RETRIES}: {str(e)}") if attempt < MAX_RETRIES - 1: logger.info(f"Retrying in {RETRY_DELAY} seconds...") time.sleep(RETRY_DELAY) else: raise finally: session.close() def parse_markdown_table(markdown_content: str) -> List[Dict[str, str]]: """解析markdown表格内容""" lines = markdown_content.strip().split("\n") data = [] data_started = False for line in lines: line = line.strip() if not line: continue # 表格行处理 if line.startswith("|"): # 分隔行(---- 或 :---: 等;允许空格,如 "| ---- | ---- |") sep_chars = line.replace("|", "").strip().replace(" ", "") if sep_chars and set(sep_chars) <= {"-", ":"}: data_started = True continue # 首个表头行:无论语言如何,统一跳过 if not data_started: # 等待下一行数据行 continue # 解析数据行 parts = [p.strip() for p in line.split("|")] parts = [p for p in parts if p] # 移除空字符串 if len(parts) >= 2: row = { "seq_no": parts[0], "title": parts[1], # 商品标题(按目标语言) "category_path": parts[2] if len(parts) > 2 else "", # 品类路径 "tags": parts[3] if len(parts) > 3 else "", # 细分标签 "target_audience": parts[4] if len(parts) > 4 else "", # 适用人群 "usage_scene": parts[5] if len(parts) > 5 else "", # 使用场景 "season": parts[6] if len(parts) > 6 else "", # 适用季节 "key_attributes": parts[7] if len(parts) > 7 else "", # 关键属性 "material": parts[8] if len(parts) > 8 else "", # 材质说明 "features": parts[9] if len(parts) > 9 else "", # 功能特点 "selling_points": parts[10] if len(parts) > 10 else "", # 商品卖点 "anchor_text": parts[11] if len(parts) > 11 else "", # 锚文本 } data.append(row) return data def process_batch( batch_data: List[Dict[str, str]], batch_num: int, target_lang: str = "zh", ) -> List[Dict[str, str]]: """处理一个批次的数据""" logger.info(f"\n{'#' * 80}") logger.info(f"Processing Batch {batch_num} ({len(batch_data)} items)") # 创建提示词 prompt = create_prompt(batch_data, target_lang=target_lang) # 调用LLM try: raw_response, full_response_json = call_llm(prompt, target_lang=target_lang) # 解析结果 parsed_results = parse_markdown_table(raw_response) logger.info(f"\nParsed Results ({len(parsed_results)} items):") logger.info(json.dumps(parsed_results, ensure_ascii=False, indent=2)) # 映射回原始ID results_with_ids = [] for i, parsed_item in enumerate(parsed_results): if i < len(batch_data): original_id = batch_data[i]["id"] result = { "id": original_id, "lang": target_lang, "title_input": batch_data[i]["title"], # 原始输入标题 "title": parsed_item.get("title", ""), # 模型生成的标题 "category_path": parsed_item.get("category_path", ""), # 品类路径 "tags": parsed_item.get("tags", ""), # 细分标签 "target_audience": parsed_item.get("target_audience", ""), # 适用人群 "usage_scene": parsed_item.get("usage_scene", ""), # 使用场景 "season": parsed_item.get("season", ""), # 适用季节 "key_attributes": parsed_item.get("key_attributes", ""), # 关键属性 "material": parsed_item.get("material", ""), # 材质说明 "features": parsed_item.get("features", ""), # 功能特点 "selling_points": parsed_item.get("selling_points", ""), # 商品卖点 "anchor_text": parsed_item.get("anchor_text", ""), # 锚文本 } results_with_ids.append(result) logger.info(f"Mapped: seq={parsed_item['seq_no']} -> original_id={original_id}") # 保存批次 JSON 日志到独立文件 batch_log = { "batch_num": batch_num, "timestamp": datetime.now().isoformat(), "input_products": batch_data, "raw_response": raw_response, "full_response_json": full_response_json, "parsed_results": parsed_results, "final_results": results_with_ids, } batch_log_file = LOG_DIR / f"batch_{batch_num:04d}_{timestamp}.json" with open(batch_log_file, "w", encoding="utf-8") as f: json.dump(batch_log, f, ensure_ascii=False, indent=2) logger.info(f"Batch log saved to: {batch_log_file}") return results_with_ids except Exception as e: logger.error(f"Error processing batch {batch_num}: {str(e)}", exc_info=True) # 返回空结果,保持ID映射 return [ { "id": item["id"], "lang": target_lang, "title_input": item["title"], "title": "", "category_path": "", "tags": "", "target_audience": "", "usage_scene": "", "season": "", "key_attributes": "", "material": "", "features": "", "selling_points": "", "anchor_text": "", "error": str(e), } for item in batch_data ] def analyze_products( products: List[Dict[str, str]], target_lang: str = "zh", batch_size: Optional[int] = None, tenant_id: Optional[str] = None, ) -> List[Dict[str, Any]]: """ 库调用入口:根据输入+语言,返回锚文本及各维度信息。 Args: products: [{"id": "...", "title": "..."}] target_lang: 输出语言 batch_size: 批大小,默认使用全局 BATCH_SIZE """ if not API_KEY: raise RuntimeError("DASHSCOPE_API_KEY is not set, cannot call LLM") if not products: return [] # 简单路径:索引阶段通常 batch_size=1,这里优先做单条缓存命中 if len(products) == 1: p = products[0] title = str(p.get("title") or "").strip() if title: cached = _get_cached_anchor_result(title, target_lang, tenant_id=tenant_id) if cached: logger.info( f"[analyze_products] Cache hit for title='{title[:50]}...', " f"lang={target_lang}, tenant_id={tenant_id or 'global'}" ) return [cached] # call_llm 一次处理上限固定为 BATCH_SIZE(默认 20): # - 尽可能攒批处理; # - 即便调用方传入更大的 batch_size,也会自动按上限拆批。 req_bs = BATCH_SIZE if batch_size is None else int(batch_size) bs = max(1, min(req_bs, BATCH_SIZE)) all_results: List[Dict[str, Any]] = [] total_batches = (len(products) + bs - 1) // bs for i in range(0, len(products), bs): batch_num = i // bs + 1 batch = products[i : i + bs] logger.info( f"[analyze_products] Processing batch {batch_num}/{total_batches}, " f"size={len(batch)}, target_lang={target_lang}" ) batch_results = process_batch(batch, batch_num=batch_num, target_lang=target_lang) all_results.extend(batch_results) # 写入缓存 for item in batch_results: title_input = str(item.get("title_input") or "").strip() if not title_input: continue if item.get("error"): # 不缓存错误结果,避免放大临时故障 continue try: _set_cached_anchor_result(title_input, target_lang, item, tenant_id=tenant_id) except Exception: # 已在内部记录 warning pass return all_results