c81b0fc1
tangwang
scripts/evaluatio...
|
1
2
3
4
|
"""HTTP clients for search API, reranker, and DashScope chat (relevance labeling)."""
from __future__ import annotations
|
bdb65283
tangwang
标注框架 批量标注
|
5
6
|
import io
import json
|
cdd8ee3a
tangwang
eval框架日志独立
|
7
8
|
import logging
import threading
|
bdb65283
tangwang
标注框架 批量标注
|
9
10
|
import time
import uuid
|
c81b0fc1
tangwang
scripts/evaluatio...
|
11
12
13
14
|
from typing import Any, Dict, List, Optional, Sequence, Tuple
import requests
|
cdd8ee3a
tangwang
eval框架日志独立
|
15
16
17
|
from .constants import EVAL_VERBOSE_LOG_FILE, VALID_LABELS
from .logging_setup import setup_eval_logging
from .prompts import classify_prompt, intent_analysis_prompt
|
c81b0fc1
tangwang
scripts/evaluatio...
|
18
19
|
from .utils import build_label_doc_line, extract_json_blob, safe_json_dumps
|
cdd8ee3a
tangwang
eval框架日志独立
|
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
_VERBOSE_LOGGER_LOCK = threading.Lock()
_eval_llm_verbose_logger_singleton: logging.Logger | None = None
_eval_llm_verbose_path_logged = False
def _get_eval_llm_verbose_logger() -> logging.Logger:
"""File logger for full LLM prompts/responses → ``logs/verbose/eval_verbose.log``."""
setup_eval_logging()
global _eval_llm_verbose_logger_singleton, _eval_llm_verbose_path_logged
with _VERBOSE_LOGGER_LOCK:
if _eval_llm_verbose_logger_singleton is not None:
return _eval_llm_verbose_logger_singleton
log_path = EVAL_VERBOSE_LOG_FILE
log_path.parent.mkdir(parents=True, exist_ok=True)
lg = logging.getLogger("search_eval.verbose_llm")
lg.setLevel(logging.INFO)
if not lg.handlers:
handler = logging.FileHandler(log_path, encoding="utf-8")
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
lg.addHandler(handler)
lg.propagate = False
_eval_llm_verbose_logger_singleton = lg
if not _eval_llm_verbose_path_logged:
_eval_llm_verbose_path_logged = True
logging.getLogger("search_eval").info(
"LLM verbose I/O log (full prompt + response): %s",
log_path.resolve(),
)
return lg
def _log_eval_llm_verbose(
*,
phase: str,
model: str,
prompt: str,
assistant_text: str,
raw_response: str,
) -> None:
log = _get_eval_llm_verbose_logger()
sep = "=" * 80
log.info("\n%s", sep)
log.info("phase=%s model=%s", phase, model)
log.info("%s\nFULL PROMPT (user message)\n%s", sep, prompt)
log.info("%s\nASSISTANT CONTENT (parsed)\n%s", sep, assistant_text)
log.info("%s\nRAW RESPONSE (JSON string)\n%s", sep, raw_response)
log.info("%s\n", sep)
|
c81b0fc1
tangwang
scripts/evaluatio...
|
68
|
|
a345b01f
tangwang
eval framework
|
69
70
71
72
73
74
75
76
77
78
79
|
def _canonicalize_judge_label(raw: str) -> str | None:
s = str(raw or "").strip().strip('"').strip("'")
if s in VALID_LABELS:
return s
low = s.lower()
for v in VALID_LABELS:
if v.lower() == low:
return v
return None
|
c81b0fc1
tangwang
scripts/evaluatio...
|
80
81
82
83
84
85
|
class SearchServiceClient:
def __init__(self, base_url: str, tenant_id: str):
self.base_url = base_url.rstrip("/")
self.tenant_id = str(tenant_id)
self.session = requests.Session()
|
167f33b4
tangwang
eval框架前端
|
86
87
88
89
90
91
92
93
94
|
def search(self, query: str, size: int, from_: int = 0, language: str = "en", *, debug: bool = False) -> Dict[str, Any]:
payload: Dict[str, Any] = {
"query": query,
"size": size,
"from": from_,
"language": language,
}
if debug:
payload["debug"] = True
|
c81b0fc1
tangwang
scripts/evaluatio...
|
95
96
97
|
response = self.session.post(
f"{self.base_url}/search/",
headers={"Content-Type": "application/json", "X-Tenant-ID": self.tenant_id},
|
167f33b4
tangwang
eval框架前端
|
98
|
json=payload,
|
c81b0fc1
tangwang
scripts/evaluatio...
|
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
timeout=120,
)
response.raise_for_status()
return response.json()
class RerankServiceClient:
def __init__(self, service_url: str):
self.service_url = service_url.rstrip("/")
self.session = requests.Session()
def rerank(self, query: str, docs: Sequence[str], normalize: bool = False, top_n: Optional[int] = None) -> Tuple[List[float], Dict[str, Any]]:
payload: Dict[str, Any] = {
"query": query,
"docs": list(docs),
"normalize": normalize,
}
if top_n is not None:
payload["top_n"] = int(top_n)
response = self.session.post(self.service_url, json=payload, timeout=180)
response.raise_for_status()
data = response.json()
return list(data.get("scores") or []), dict(data.get("meta") or {})
class DashScopeLabelClient:
|
bdb65283
tangwang
标注框架 批量标注
|
125
126
127
|
"""DashScope OpenAI-compatible chat: synchronous or Batch File API (JSONL job).
Batch flow: https://help.aliyun.com/zh/model-studio/batch-interfaces-compatible-with-openai/
|
a3734f13
tangwang
eval任务 美国地区不支持bat...
|
128
129
130
131
|
Some regional endpoints (e.g. ``dashscope-us`` compatible-mode) do not implement ``/batches``;
on HTTP 404 from batch calls we fall back to synchronous ``/chat/completions`` and stop using batch
for subsequent requests on this client.
|
bdb65283
tangwang
标注框架 批量标注
|
132
133
134
135
136
137
138
139
140
141
142
143
|
"""
def __init__(
self,
model: str,
base_url: str,
api_key: str,
batch_size: int = 40,
*,
batch_completion_window: str = "24h",
batch_poll_interval_sec: float = 10.0,
enable_thinking: bool = True,
|
a3734f13
tangwang
eval任务 美国地区不支持bat...
|
144
|
use_batch: bool = False,
|
bdb65283
tangwang
标注框架 批量标注
|
145
|
):
|
c81b0fc1
tangwang
scripts/evaluatio...
|
146
147
148
149
|
self.model = model
self.base_url = base_url.rstrip("/")
self.api_key = api_key
self.batch_size = int(batch_size)
|
bdb65283
tangwang
标注框架 批量标注
|
150
151
152
153
|
self.batch_completion_window = str(batch_completion_window)
self.batch_poll_interval_sec = float(batch_poll_interval_sec)
self.enable_thinking = bool(enable_thinking)
self.use_batch = bool(use_batch)
|
c81b0fc1
tangwang
scripts/evaluatio...
|
154
155
|
self.session = requests.Session()
|
bdb65283
tangwang
标注框架 批量标注
|
156
157
158
159
160
161
162
163
164
165
166
167
168
169
|
def _auth_headers(self) -> Dict[str, str]:
return {"Authorization": f"Bearer {self.api_key}"}
def _completion_body(self, prompt: str) -> Dict[str, Any]:
body: Dict[str, Any] = {
"model": self.model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0,
"top_p": 0.1,
"enable_thinking": self.enable_thinking,
}
return body
def _chat_sync(self, prompt: str) -> Tuple[str, str]:
|
c81b0fc1
tangwang
scripts/evaluatio...
|
170
171
|
response = self.session.post(
f"{self.base_url}/chat/completions",
|
bdb65283
tangwang
标注框架 批量标注
|
172
173
|
headers={**self._auth_headers(), "Content-Type": "application/json"},
json=self._completion_body(prompt),
|
c81b0fc1
tangwang
scripts/evaluatio...
|
174
175
176
177
178
179
180
|
timeout=180,
)
response.raise_for_status()
data = response.json()
content = str(((data.get("choices") or [{}])[0].get("message") or {}).get("content") or "").strip()
return content, safe_json_dumps(data)
|
bdb65283
tangwang
标注框架 批量标注
|
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
|
def _chat_batch(self, prompt: str) -> Tuple[str, str]:
"""One chat completion via Batch File API (single-line JSONL job)."""
custom_id = uuid.uuid4().hex
body = self._completion_body(prompt)
line_obj = {
"custom_id": custom_id,
"method": "POST",
"url": "/v1/chat/completions",
"body": body,
}
jsonl = json.dumps(line_obj, ensure_ascii=False, separators=(",", ":")) + "\n"
auth = self._auth_headers()
up = self.session.post(
f"{self.base_url}/files",
headers=auth,
files={
"file": (
"eval_batch_input.jsonl",
io.BytesIO(jsonl.encode("utf-8")),
"application/octet-stream",
)
},
data={"purpose": "batch"},
timeout=300,
)
up.raise_for_status()
file_id = (up.json() or {}).get("id")
if not file_id:
raise RuntimeError(f"DashScope file upload returned no id: {up.text!r}")
cr = self.session.post(
f"{self.base_url}/batches",
headers={**auth, "Content-Type": "application/json"},
json={
"input_file_id": file_id,
"endpoint": "/v1/chat/completions",
"completion_window": self.batch_completion_window,
},
timeout=120,
)
cr.raise_for_status()
batch_payload = cr.json() or {}
batch_id = batch_payload.get("id")
if not batch_id:
raise RuntimeError(f"DashScope batches.create returned no id: {cr.text!r}")
terminal = frozenset({"completed", "failed", "expired", "cancelled"})
batch: Dict[str, Any] = dict(batch_payload)
status = str(batch.get("status") or "")
while status not in terminal:
time.sleep(self.batch_poll_interval_sec)
br = self.session.get(f"{self.base_url}/batches/{batch_id}", headers=auth, timeout=120)
br.raise_for_status()
batch = br.json() or {}
status = str(batch.get("status") or "")
if status != "completed":
raise RuntimeError(
f"DashScope batch {batch_id} ended with status={status!r} errors={batch.get('errors')!r}"
)
out_id = batch.get("output_file_id")
err_id = batch.get("error_file_id")
row = self._find_batch_line_for_custom_id(out_id, custom_id, auth)
if row is None:
err_row = self._find_batch_line_for_custom_id(err_id, custom_id, auth)
if err_row is not None:
raise RuntimeError(f"DashScope batch request failed: {err_row!r}")
raise RuntimeError(f"DashScope batch output missing custom_id={custom_id!r}")
resp = row.get("response") or {}
sc = resp.get("status_code")
if sc is not None and int(sc) != 200:
raise RuntimeError(f"DashScope batch line error: {row!r}")
data = resp.get("body") or {}
content = str(((data.get("choices") or [{}])[0].get("message") or {}).get("content") or "").strip()
return content, safe_json_dumps(row)
|
cdd8ee3a
tangwang
eval框架日志独立
|
262
|
def _chat(self, prompt: str, *, phase: str = "chat") -> Tuple[str, str]:
|
a3734f13
tangwang
eval任务 美国地区不支持bat...
|
263
|
if not self.use_batch:
|
cdd8ee3a
tangwang
eval框架日志独立
|
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
|
content, raw = self._chat_sync(prompt)
else:
try:
content, raw = self._chat_batch(prompt)
except requests.exceptions.HTTPError as e:
resp = getattr(e, "response", None)
if resp is not None and resp.status_code == 404:
self.use_batch = False
content, raw = self._chat_sync(prompt)
else:
raise
_log_eval_llm_verbose(
phase=phase,
model=self.model,
prompt=prompt,
assistant_text=content,
raw_response=raw,
)
return content, raw
|
bdb65283
tangwang
标注框架 批量标注
|
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
|
def _find_batch_line_for_custom_id(
self,
file_id: Optional[str],
custom_id: str,
auth: Dict[str, str],
) -> Optional[Dict[str, Any]]:
if not file_id or str(file_id) in ("null", ""):
return None
r = self.session.get(f"{self.base_url}/files/{file_id}/content", headers=auth, timeout=300)
r.raise_for_status()
for raw in r.text.splitlines():
raw = raw.strip()
if not raw:
continue
try:
obj = json.loads(raw)
except json.JSONDecodeError:
continue
if str(obj.get("custom_id")) == custom_id:
return obj
return None
|
cdd8ee3a
tangwang
eval框架日志独立
|
306
307
308
309
|
def query_intent(self, query: str) -> Tuple[str, str]:
prompt = intent_analysis_prompt(query)
return self._chat(prompt, phase="query_intent")
|
a345b01f
tangwang
eval framework
|
310
|
def classify_batch(
|
c81b0fc1
tangwang
scripts/evaluatio...
|
311
312
313
|
self,
query: str,
docs: Sequence[Dict[str, Any]],
|
cdd8ee3a
tangwang
eval框架日志独立
|
314
315
|
*,
query_intent_block: str = "",
|
c81b0fc1
tangwang
scripts/evaluatio...
|
316
317
|
) -> Tuple[List[str], str]:
numbered_docs = [build_label_doc_line(idx + 1, doc) for idx, doc in enumerate(docs)]
|
cdd8ee3a
tangwang
eval框架日志独立
|
318
319
|
prompt = classify_prompt(query, numbered_docs, query_intent_block=query_intent_block)
content, raw_response = self._chat(prompt, phase="relevance_classify")
|
a345b01f
tangwang
eval framework
|
320
|
labels: List[str] = []
|
c81b0fc1
tangwang
scripts/evaluatio...
|
321
|
for line in str(content or "").splitlines():
|
a345b01f
tangwang
eval framework
|
322
323
324
|
canon = _canonicalize_judge_label(line)
if canon is not None:
labels.append(canon)
|
c81b0fc1
tangwang
scripts/evaluatio...
|
325
326
327
328
329
330
|
if len(labels) != len(docs):
payload = extract_json_blob(content)
if isinstance(payload, dict) and isinstance(payload.get("labels"), list):
labels = []
for item in payload["labels"][: len(docs)]:
if isinstance(item, dict):
|
a345b01f
tangwang
eval framework
|
331
|
raw_l = str(item.get("label") or "").strip()
|
c81b0fc1
tangwang
scripts/evaluatio...
|
332
|
else:
|
a345b01f
tangwang
eval framework
|
333
334
335
336
|
raw_l = str(item).strip()
canon = _canonicalize_judge_label(raw_l)
if canon is not None:
labels.append(canon)
|
c81b0fc1
tangwang
scripts/evaluatio...
|
337
|
if len(labels) != len(docs) or any(label not in VALID_LABELS for label in labels):
|
a345b01f
tangwang
eval framework
|
338
|
raise ValueError(f"unexpected classify output: {content!r}")
|
c81b0fc1
tangwang
scripts/evaluatio...
|
339
|
return labels, raw_response
|