Blame view

query/llm_translate.py 6 KB
a0a173ae   tangwang   last
1
  """
d4cadc13   tangwang   翻译重构
2
  LLM-based translation backend (DashScope-compatible OpenAI API).
a0a173ae   tangwang   last
3
  
d4cadc13   tangwang   翻译重构
4
5
6
  Failure semantics are strict:
  - success: translated string
  - failure: None
a0a173ae   tangwang   last
7
8
9
10
11
12
13
  """
  
  from __future__ import annotations
  
  import logging
  import os
  import time
d4cadc13   tangwang   翻译重构
14
  from typing import Optional
a0a173ae   tangwang   last
15
16
17
18
19
  
  from openai import OpenAI
  
  from config.env_config import DASHSCOPE_API_KEY
  from config.services_config import get_translation_config
d4cadc13   tangwang   翻译重构
20
21
  from config.translate_prompts import TRANSLATION_PROMPTS, SOURCE_LANG_CODE_MAP
  
a0a173ae   tangwang   last
22
23
24
25
  
  logger = logging.getLogger(__name__)
  
  
a0a173ae   tangwang   last
26
  DEFAULT_QWEN_BASE_URL = "https://dashscope-us.aliyuncs.com/compatible-mode/v1"
d4cadc13   tangwang   翻译重构
27
  DEFAULT_LLM_MODEL = "qwen-flash"
a0a173ae   tangwang   last
28
29
30
31
  
  
  def _build_prompt(
      text: str,
d4cadc13   tangwang   翻译重构
32
33
      *,
      source_lang: Optional[str],
a0a173ae   tangwang   last
34
      target_lang: str,
d4cadc13   tangwang   翻译重构
35
      scene: Optional[str],
a0a173ae   tangwang   last
36
37
  ) -> str:
      """
d4cadc13   tangwang   翻译重构
38
39
40
41
       config.translate_prompts.TRANSLATION_PROMPTS 中构建提示词。
  
      要求:模板必须包含 {source_lang}{src_lang_code}{target_lang}{tgt_lang_code})。
      这里统一使用 code 作为占位的 lang  label,外部接口仍然只传语言 code
a0a173ae   tangwang   last
42
      """
d4cadc13   tangwang   翻译重构
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
      tgt = (target_lang or "").lower() or "en"
      src = (source_lang or "auto").lower()
  
      # 将业务上下文 scene 映射为模板分组名
      normalized_scene = (scene or "").strip() or "general"
      # 如果出现历史词,则报错,用于发现错误
      if normalized_scene in {"query", "ecommerce_search", "ecommerce_search_query"}:
          group_key = "ecommerce_search_query"
      elif normalized_scene in {"product_title", "sku_name"}:
          group_key = "sku_name"
      else:
          group_key = normalized_scene
      group = TRANSLATION_PROMPTS.get(group_key) or TRANSLATION_PROMPTS["general"]
  
      # 先按目标语言 code 取模板,取不到回退到英文
      template = group.get(tgt) or group.get("en")
      if not template:
          # 理论上不会发生,兜底一个简单模板
          template = (
              "You are a professional {source_lang} ({src_lang_code}) to "
              "{target_lang} ({tgt_lang_code}) translator, output only the translation: {text}"
          )
  
      # 目前不额外维护语言名称映射,直接使用 code 作为 label
      source_lang_label = SOURCE_LANG_CODE_MAP.get(src, src)
      target_lang_label = SOURCE_LANG_CODE_MAP.get(tgt, tgt)
  
a0a173ae   tangwang   last
70
71
      return template.format(
          source_lang=source_lang_label,
d4cadc13   tangwang   翻译重构
72
          src_lang_code=src,
a0a173ae   tangwang   last
73
          target_lang=target_lang_label,
d4cadc13   tangwang   翻译重构
74
          tgt_lang_code=tgt,
a0a173ae   tangwang   last
75
76
77
78
          text=text,
      )
  
  
d4cadc13   tangwang   翻译重构
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
  class LLMTranslatorProvider:
      def __init__(
          self,
          *,
          model: Optional[str] = None,
          timeout_sec: float = 30.0,
          base_url: Optional[str] = None,
      ) -> None:
          cfg = get_translation_config()
          llm_cfg = cfg.providers.get("llm", {}) if isinstance(cfg.providers, dict) else {}
          self.model = model or llm_cfg.get("model") or DEFAULT_LLM_MODEL
          self.timeout_sec = float(llm_cfg.get("timeout_sec") or timeout_sec or 30.0)
          self.base_url = (
              (base_url or "").strip()
              or (llm_cfg.get("base_url") or "").strip()
              or os.getenv("DASHSCOPE_BASE_URL")
              or DEFAULT_QWEN_BASE_URL
          )
          self.client = self._create_client()
  
      def _create_client(self) -> Optional[OpenAI]:
          api_key = DASHSCOPE_API_KEY or os.getenv("DASHSCOPE_API_KEY")
          if not api_key:
              logger.warning("DASHSCOPE_API_KEY not set; llm translation unavailable")
              return None
          try:
              return OpenAI(api_key=api_key, base_url=self.base_url)
          except Exception as exc:
              logger.error("Failed to initialize llm translation client: %s", exc, exc_info=True)
              return None
  
      def translate(
          self,
          text: str,
          target_lang: str,
          source_lang: Optional[str] = None,
          context: Optional[str] = None,
          prompt: Optional[str] = None,
      ) -> Optional[str]:
          if not text or not str(text).strip():
              return text
          if not self.client:
              return None
  
          tgt = (target_lang or "").lower() or "en"
          src = (source_lang or "auto").lower()
          scene = context or "default"
          user_prompt = prompt or _build_prompt(
              text=text,
              source_lang=src,
              target_lang=tgt,
              scene=scene,
          )
          start = time.time()
          try:
              logger.info(
                  "[llm] Request | src=%s tgt=%s model=%s prompt=%s",
                  src,
                  tgt,
                  self.model,
                  user_prompt,
              )
              completion = self.client.chat.completions.create(
                  model=self.model,
                  messages=[{"role": "user", "content": user_prompt}],
                  timeout=self.timeout_sec,
              )
              content = (completion.choices[0].message.content or "").strip()
              latency_ms = (time.time() - start) * 1000
              if not content:
                  logger.warning("[llm] Empty result | src=%s tgt=%s latency=%.1fms", src, tgt, latency_ms)
                  return None
              logger.info("[llm] Response | src=%s tgt=%s response=%s", src, tgt, content)
              logger.info("[llm] Success | src=%s tgt=%s latency=%.1fms", src, tgt, latency_ms)
              return content
          except Exception as exc:
              latency_ms = (time.time() - start) * 1000
              logger.warning(
                  "[llm] Failed | src=%s tgt=%s latency=%.1fms error=%s",
                  src,
                  tgt,
                  latency_ms,
                  exc,
                  exc_info=True,
              )
              return None
  
  
a0a173ae   tangwang   last
167
168
169
170
171
172
173
174
175
  def llm_translate(
      text: str,
      target_lang: str,
      *,
      source_lang: Optional[str] = None,
      source_lang_label: Optional[str] = None,
      target_lang_label: Optional[str] = None,
      timeout_sec: Optional[float] = None,
  ) -> Optional[str]:
d4cadc13   tangwang   翻译重构
176
177
      provider = LLMTranslatorProvider(timeout_sec=timeout_sec or 30.0)
      return provider.translate(
a0a173ae   tangwang   last
178
          text=text,
d4cadc13   tangwang   翻译重构
179
180
181
          target_lang=target_lang,
          source_lang=source_lang,
          context=None,
a0a173ae   tangwang   last
182
183
      )
  
a0a173ae   tangwang   last
184
  
d4cadc13   tangwang   翻译重构
185
  __all__ = ["LLMTranslatorProvider", "llm_translate"]