llm.py
5.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
"""LLM-based translation backend."""
from __future__ import annotations
import logging
import time
from typing import List, Optional, Sequence, Union
from openai import OpenAI
from translation.languages import LANGUAGE_LABELS
from translation.prompts import TRANSLATION_PROMPTS
from translation.scenes import normalize_scene_name
logger = logging.getLogger(__name__)
def _build_prompt(
text: str,
*,
source_lang: Optional[str],
target_lang: str,
scene: Optional[str],
) -> str:
tgt = str(target_lang or "").strip().lower()
src = str(source_lang or "auto").strip().lower() or "auto"
normalized_scene = normalize_scene_name(scene)
group = TRANSLATION_PROMPTS[normalized_scene]
template = group.get(tgt) or group.get("en")
if template is None:
raise ValueError(f"Missing llm translation prompt for scene='{normalized_scene}' target_lang='{tgt}'")
source_lang_label = LANGUAGE_LABELS.get(src, src)
target_lang_label = LANGUAGE_LABELS.get(tgt, tgt)
return template.format(
source_lang=source_lang_label,
src_lang_code=src,
target_lang=target_lang_label,
tgt_lang_code=tgt,
text=text,
)
class LLMTranslationBackend:
def __init__(
self,
*,
capability_name: str,
model: str,
timeout_sec: float,
base_url: str,
api_key: Optional[str],
) -> None:
self.capability_name = capability_name
self.model = model
self.timeout_sec = float(timeout_sec)
self.base_url = base_url
self.api_key = api_key
self.client = self._create_client()
@property
def supports_batch(self) -> bool:
return True
def _create_client(self) -> Optional[OpenAI]:
if not self.api_key:
logger.warning("DASHSCOPE_API_KEY not set; llm translation unavailable")
return None
try:
return OpenAI(api_key=self.api_key, base_url=self.base_url)
except Exception as exc:
logger.error("Failed to initialize llm translation client: %s", exc, exc_info=True)
return None
def _translate_single(
self,
text: str,
target_lang: str,
source_lang: Optional[str] = None,
scene: Optional[str] = None,
) -> Optional[str]:
if not text or not str(text).strip():
return text
if not self.client:
return None
tgt = str(target_lang or "").strip().lower()
src = str(source_lang or "auto").strip().lower() or "auto"
if scene is None:
raise ValueError("llm translation scene is required")
normalized_scene = normalize_scene_name(scene)
user_prompt = _build_prompt(
text=text,
source_lang=src,
target_lang=tgt,
scene=normalized_scene,
)
start = time.time()
try:
logger.info(
"[llm] Request | src=%s tgt=%s model=%s prompt=%s",
src,
tgt,
self.model,
user_prompt,
)
completion = self.client.chat.completions.create(
model=self.model,
messages=[{"role": "user", "content": user_prompt}],
timeout=self.timeout_sec,
)
content = (completion.choices[0].message.content or "").strip()
latency_ms = (time.time() - start) * 1000
if not content:
logger.warning("[llm] Empty result | src=%s tgt=%s latency=%.1fms", src, tgt, latency_ms)
return None
logger.info(
"[llm] Success | src=%s tgt=%s src_text=%s response=%s latency=%.1fms",
src,
tgt,
text,
content,
latency_ms,
)
return content
except Exception as exc:
latency_ms = (time.time() - start) * 1000
logger.warning(
"[llm] Failed | src=%s tgt=%s latency=%.1fms error=%s",
src,
tgt,
latency_ms,
exc,
exc_info=True,
)
return None
def translate(
self,
text: Union[str, Sequence[str]],
target_lang: str,
source_lang: Optional[str] = None,
scene: Optional[str] = None,
) -> Union[Optional[str], List[Optional[str]]]:
if isinstance(text, (list, tuple)):
results: List[Optional[str]] = []
for item in text:
if item is None:
results.append(None)
continue
results.append(
self._translate_single(
text=str(item),
target_lang=target_lang,
source_lang=source_lang,
scene=scene,
)
)
return results
return self._translate_single(
text=str(text),
target_lang=target_lang,
source_lang=source_lang,
scene=scene,
)