7bfb9946
tangwang
向量化模块
|
1
2
3
|
"""
Embedding service (FastAPI).
|
ed948666
tangwang
tidy
|
4
5
6
|
API (simple list-in, list-out; aligned by index):
- POST /embed/text body: ["text1", "text2", ...] -> [[...], ...]
- POST /embed/image body: ["url_or_path1", ...] -> [[...], ...]
|
7bfb9946
tangwang
向量化模块
|
7
8
|
"""
|
0a3764c4
tangwang
优化embedding模型加载
|
9
|
import logging
|
07cf5a93
tangwang
START_EMBEDDING=...
|
10
|
import os
|
4747e2f4
tangwang
embedding perform...
|
11
|
import pathlib
|
7bfb9946
tangwang
向量化模块
|
12
|
import threading
|
efd435cf
tangwang
tei性能调优:
|
13
|
import time
|
4747e2f4
tangwang
embedding perform...
|
14
|
import uuid
|
efd435cf
tangwang
tei性能调优:
|
15
16
|
from collections import deque
from dataclasses import dataclass
|
4747e2f4
tangwang
embedding perform...
|
17
|
from logging.handlers import TimedRotatingFileHandler
|
7bfb9946
tangwang
向量化模块
|
18
19
20
|
from typing import Any, Dict, List, Optional
import numpy as np
|
4747e2f4
tangwang
embedding perform...
|
21
22
|
from fastapi import FastAPI, HTTPException, Request, Response
from fastapi.concurrency import run_in_threadpool
|
7bfb9946
tangwang
向量化模块
|
23
|
|
7214c2e7
tangwang
mplemented**
|
24
|
from config.env_config import REDIS_CONFIG
|
4747e2f4
tangwang
embedding perform...
|
25
|
from config.services_config import get_embedding_backend_config
|
7214c2e7
tangwang
mplemented**
|
26
|
from embeddings.cache_keys import build_image_cache_key, build_text_cache_key
|
7bfb9946
tangwang
向量化模块
|
27
|
from embeddings.config import CONFIG
|
c10f90fe
tangwang
cnclip
|
28
|
from embeddings.protocols import ImageEncoderProtocol
|
7214c2e7
tangwang
mplemented**
|
29
|
from embeddings.redis_embedding_cache import RedisEmbeddingCache
|
7bfb9946
tangwang
向量化模块
|
30
|
|
a7920e17
tangwang
项目名称和部署路径修改
|
31
|
app = FastAPI(title="saas-search Embedding Service", version="1.0.0")
|
7bfb9946
tangwang
向量化模块
|
32
|
|
4747e2f4
tangwang
embedding perform...
|
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
|
class _DefaultRequestIdFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
if not hasattr(record, "reqid"):
record.reqid = "-1"
return True
def configure_embedding_logging() -> None:
root_logger = logging.getLogger()
if getattr(root_logger, "_embedding_logging_configured", False):
return
log_dir = pathlib.Path("logs")
verbose_dir = log_dir / "verbose"
log_dir.mkdir(exist_ok=True)
verbose_dir.mkdir(parents=True, exist_ok=True)
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
numeric_level = getattr(logging, log_level, logging.INFO)
formatter = logging.Formatter(
"%(asctime)s | reqid:%(reqid)s | %(name)s | %(levelname)s | %(message)s"
)
request_filter = _DefaultRequestIdFilter()
root_logger.setLevel(numeric_level)
file_handler = TimedRotatingFileHandler(
filename=log_dir / "embedding_api.log",
when="midnight",
interval=1,
backupCount=30,
encoding="utf-8",
)
file_handler.setLevel(numeric_level)
file_handler.setFormatter(formatter)
file_handler.addFilter(request_filter)
root_logger.addHandler(file_handler)
error_handler = TimedRotatingFileHandler(
filename=log_dir / "embedding_api_error.log",
when="midnight",
interval=1,
backupCount=30,
encoding="utf-8",
)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(formatter)
error_handler.addFilter(request_filter)
root_logger.addHandler(error_handler)
verbose_logger = logging.getLogger("embedding.verbose")
verbose_logger.setLevel(numeric_level)
verbose_logger.handlers.clear()
verbose_logger.propagate = False
verbose_handler = TimedRotatingFileHandler(
filename=verbose_dir / "embedding_verbose.log",
when="midnight",
interval=1,
backupCount=30,
encoding="utf-8",
)
verbose_handler.setLevel(numeric_level)
verbose_handler.setFormatter(formatter)
verbose_handler.addFilter(request_filter)
verbose_logger.addHandler(verbose_handler)
root_logger._embedding_logging_configured = True # type: ignore[attr-defined]
configure_embedding_logging()
logger = logging.getLogger(__name__)
verbose_logger = logging.getLogger("embedding.verbose")
|
0a3764c4
tangwang
优化embedding模型加载
|
108
|
# Models are loaded at startup, not lazily
|
950a640e
tangwang
embeddings
|
109
|
_text_model: Optional[Any] = None
|
c10f90fe
tangwang
cnclip
|
110
|
_image_model: Optional[ImageEncoderProtocol] = None
|
07cf5a93
tangwang
START_EMBEDDING=...
|
111
|
_text_backend_name: str = ""
|
7214c2e7
tangwang
mplemented**
|
112
113
114
115
116
117
118
119
120
|
_SERVICE_KIND = (os.getenv("EMBEDDING_SERVICE_KIND", "all") or "all").strip().lower()
if _SERVICE_KIND not in {"all", "text", "image"}:
raise RuntimeError(
f"Invalid EMBEDDING_SERVICE_KIND={_SERVICE_KIND!r}; expected all, text, or image"
)
_TEXT_ENABLED_BY_ENV = os.getenv("EMBEDDING_ENABLE_TEXT_MODEL", "true").lower() in ("1", "true", "yes")
_IMAGE_ENABLED_BY_ENV = os.getenv("EMBEDDING_ENABLE_IMAGE_MODEL", "true").lower() in ("1", "true", "yes")
open_text_model = _TEXT_ENABLED_BY_ENV and _SERVICE_KIND in {"all", "text"}
open_image_model = _IMAGE_ENABLED_BY_ENV and _SERVICE_KIND in {"all", "image"}
|
7bfb9946
tangwang
向量化模块
|
121
122
123
124
|
_text_encode_lock = threading.Lock()
_image_encode_lock = threading.Lock()
|
4747e2f4
tangwang
embedding perform...
|
125
126
127
128
129
130
131
132
133
134
135
136
137
|
_TEXT_MICROBATCH_WINDOW_SEC = max(
0.0, float(os.getenv("TEXT_MICROBATCH_WINDOW_MS", "4")) / 1000.0
)
_TEXT_REQUEST_TIMEOUT_SEC = max(
1.0, float(os.getenv("TEXT_REQUEST_TIMEOUT_SEC", "30"))
)
_TEXT_MAX_INFLIGHT = max(1, int(os.getenv("TEXT_MAX_INFLIGHT", "32")))
_IMAGE_MAX_INFLIGHT = max(1, int(os.getenv("IMAGE_MAX_INFLIGHT", "1")))
_OVERLOAD_STATUS_CODE = int(os.getenv("EMBEDDING_OVERLOAD_STATUS_CODE", "503"))
_LOG_PREVIEW_COUNT = max(1, int(os.getenv("EMBEDDING_LOG_PREVIEW_COUNT", "3")))
_LOG_TEXT_PREVIEW_CHARS = max(32, int(os.getenv("EMBEDDING_LOG_TEXT_PREVIEW_CHARS", "120")))
_LOG_IMAGE_PREVIEW_CHARS = max(32, int(os.getenv("EMBEDDING_LOG_IMAGE_PREVIEW_CHARS", "180")))
_VECTOR_PREVIEW_DIMS = max(1, int(os.getenv("EMBEDDING_VECTOR_PREVIEW_DIMS", "6")))
|
7214c2e7
tangwang
mplemented**
|
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
|
_CACHE_PREFIX = str(REDIS_CONFIG.get("embedding_cache_prefix", "embedding")).strip() or "embedding"
@dataclass
class _EmbedResult:
vectors: List[Optional[List[float]]]
cache_hits: int
cache_misses: int
backend_elapsed_ms: float
mode: str
class _EndpointStats:
def __init__(self, name: str):
self.name = name
self._lock = threading.Lock()
self.request_total = 0
self.success_total = 0
self.failure_total = 0
self.rejected_total = 0
self.cache_hits = 0
self.cache_misses = 0
self.total_latency_ms = 0.0
self.total_backend_latency_ms = 0.0
def record_rejected(self) -> None:
with self._lock:
self.request_total += 1
self.rejected_total += 1
def record_completed(
self,
*,
success: bool,
latency_ms: float,
backend_latency_ms: float,
cache_hits: int,
cache_misses: int,
) -> None:
with self._lock:
self.request_total += 1
if success:
self.success_total += 1
else:
self.failure_total += 1
self.cache_hits += max(0, int(cache_hits))
self.cache_misses += max(0, int(cache_misses))
self.total_latency_ms += max(0.0, float(latency_ms))
self.total_backend_latency_ms += max(0.0, float(backend_latency_ms))
def snapshot(self) -> Dict[str, Any]:
with self._lock:
completed = self.success_total + self.failure_total
return {
"request_total": self.request_total,
"success_total": self.success_total,
"failure_total": self.failure_total,
"rejected_total": self.rejected_total,
"cache_hits": self.cache_hits,
"cache_misses": self.cache_misses,
"avg_latency_ms": round(self.total_latency_ms / completed, 3) if completed else 0.0,
"avg_backend_latency_ms": round(self.total_backend_latency_ms / completed, 3)
if completed
else 0.0,
}
|
4747e2f4
tangwang
embedding perform...
|
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
|
class _InflightLimiter:
def __init__(self, name: str, limit: int):
self.name = name
self.limit = max(1, int(limit))
self._sem = threading.BoundedSemaphore(self.limit)
self._lock = threading.Lock()
self._active = 0
self._rejected = 0
self._completed = 0
self._failed = 0
self._max_active = 0
def try_acquire(self) -> tuple[bool, int]:
if not self._sem.acquire(blocking=False):
with self._lock:
self._rejected += 1
active = self._active
return False, active
with self._lock:
self._active += 1
self._max_active = max(self._max_active, self._active)
active = self._active
return True, active
def release(self, *, success: bool) -> int:
with self._lock:
self._active = max(0, self._active - 1)
if success:
self._completed += 1
else:
self._failed += 1
active = self._active
self._sem.release()
return active
def snapshot(self) -> Dict[str, int]:
with self._lock:
return {
"limit": self.limit,
"active": self._active,
"rejected_total": self._rejected,
"completed_total": self._completed,
"failed_total": self._failed,
"max_active": self._max_active,
}
_text_request_limiter = _InflightLimiter(name="text", limit=_TEXT_MAX_INFLIGHT)
_image_request_limiter = _InflightLimiter(name="image", limit=_IMAGE_MAX_INFLIGHT)
|
7214c2e7
tangwang
mplemented**
|
254
255
256
257
|
_text_stats = _EndpointStats(name="text")
_image_stats = _EndpointStats(name="image")
_text_cache = RedisEmbeddingCache(key_prefix=_CACHE_PREFIX, namespace="")
_image_cache = RedisEmbeddingCache(key_prefix=_CACHE_PREFIX, namespace="image")
|
4747e2f4
tangwang
embedding perform...
|
258
|
|
7bfb9946
tangwang
向量化模块
|
259
|
|
efd435cf
tangwang
tei性能调优:
|
260
261
262
263
264
|
@dataclass
class _SingleTextTask:
text: str
normalize: bool
created_at: float
|
4747e2f4
tangwang
embedding perform...
|
265
|
request_id: str
|
efd435cf
tangwang
tei性能调优:
|
266
267
268
269
270
271
272
273
274
|
done: threading.Event
result: Optional[List[float]] = None
error: Optional[Exception] = None
_text_single_queue: "deque[_SingleTextTask]" = deque()
_text_single_queue_cv = threading.Condition()
_text_batch_worker: Optional[threading.Thread] = None
_text_batch_worker_stop = False
|
28e57bb1
tangwang
日志体系优化
|
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
|
def _compact_preview(text: str, max_chars: int) -> str:
compact = " ".join((text or "").split())
if len(compact) <= max_chars:
return compact
return compact[:max_chars] + "..."
def _preview_inputs(items: List[str], max_items: int, max_chars: int) -> List[Dict[str, Any]]:
previews: List[Dict[str, Any]] = []
for idx, item in enumerate(items[:max_items]):
previews.append(
{
"idx": idx,
"len": len(item),
"preview": _compact_preview(item, max_chars),
}
)
return previews
|
efd435cf
tangwang
tei性能调优:
|
295
296
|
|
4747e2f4
tangwang
embedding perform...
|
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
|
def _preview_vector(vec: Optional[List[float]], max_dims: int = _VECTOR_PREVIEW_DIMS) -> List[float]:
if not vec:
return []
return [round(float(v), 6) for v in vec[:max_dims]]
def _request_log_extra(request_id: str) -> Dict[str, str]:
return {"reqid": request_id}
def _resolve_request_id(http_request: Request) -> str:
header_value = http_request.headers.get("X-Request-ID")
if header_value and header_value.strip():
return header_value.strip()[:32]
return str(uuid.uuid4())[:8]
def _request_client(http_request: Request) -> str:
client = getattr(http_request, "client", None)
host = getattr(client, "host", None)
return str(host or "-")
|
efd435cf
tangwang
tei性能调优:
|
320
321
|
def _encode_local_st(texts: List[str], normalize_embeddings: bool) -> Any:
with _text_encode_lock:
|
77516841
tangwang
tidy embeddings
|
322
|
return _text_model.encode(
|
efd435cf
tangwang
tei性能调优:
|
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
|
texts,
batch_size=int(CONFIG.TEXT_BATCH_SIZE),
device=CONFIG.TEXT_DEVICE,
normalize_embeddings=normalize_embeddings,
)
def _start_text_batch_worker() -> None:
global _text_batch_worker, _text_batch_worker_stop
if _text_batch_worker is not None and _text_batch_worker.is_alive():
return
_text_batch_worker_stop = False
_text_batch_worker = threading.Thread(
target=_text_batch_worker_loop,
name="embed-text-microbatch-worker",
daemon=True,
)
_text_batch_worker.start()
logger.info(
"Started local_st text micro-batch worker | window_ms=%.1f max_batch=%d",
_TEXT_MICROBATCH_WINDOW_SEC * 1000.0,
int(CONFIG.TEXT_BATCH_SIZE),
)
def _stop_text_batch_worker() -> None:
global _text_batch_worker_stop
with _text_single_queue_cv:
_text_batch_worker_stop = True
_text_single_queue_cv.notify_all()
def _text_batch_worker_loop() -> None:
max_batch = max(1, int(CONFIG.TEXT_BATCH_SIZE))
while True:
with _text_single_queue_cv:
while not _text_single_queue and not _text_batch_worker_stop:
_text_single_queue_cv.wait()
if _text_batch_worker_stop:
return
batch: List[_SingleTextTask] = [_text_single_queue.popleft()]
deadline = time.perf_counter() + _TEXT_MICROBATCH_WINDOW_SEC
while len(batch) < max_batch:
remaining = deadline - time.perf_counter()
if remaining <= 0:
break
if not _text_single_queue:
_text_single_queue_cv.wait(timeout=remaining)
continue
while _text_single_queue and len(batch) < max_batch:
batch.append(_text_single_queue.popleft())
try:
|
4747e2f4
tangwang
embedding perform...
|
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
|
queue_wait_ms = [(time.perf_counter() - task.created_at) * 1000.0 for task in batch]
reqids = [task.request_id for task in batch]
logger.info(
"text microbatch dispatch | size=%d queue_wait_ms_min=%.2f queue_wait_ms_max=%.2f reqids=%s preview=%s",
len(batch),
min(queue_wait_ms) if queue_wait_ms else 0.0,
max(queue_wait_ms) if queue_wait_ms else 0.0,
reqids,
_preview_inputs(
[task.text for task in batch],
_LOG_PREVIEW_COUNT,
_LOG_TEXT_PREVIEW_CHARS,
),
)
batch_t0 = time.perf_counter()
|
efd435cf
tangwang
tei性能调优:
|
393
394
395
396
397
398
399
400
401
402
403
|
embs = _encode_local_st([task.text for task in batch], normalize_embeddings=False)
if embs is None or len(embs) != len(batch):
raise RuntimeError(
f"Text model response length mismatch in micro-batch: "
f"expected {len(batch)}, got {0 if embs is None else len(embs)}"
)
for task, emb in zip(batch, embs):
vec = _as_list(emb, normalize=task.normalize)
if vec is None:
raise RuntimeError("Text model returned empty embedding in micro-batch")
task.result = vec
|
4747e2f4
tangwang
embedding perform...
|
404
405
406
407
408
409
410
|
logger.info(
"text microbatch done | size=%d reqids=%s dim=%d backend_elapsed_ms=%.2f",
len(batch),
reqids,
len(batch[0].result) if batch and batch[0].result is not None else 0,
(time.perf_counter() - batch_t0) * 1000.0,
)
|
efd435cf
tangwang
tei性能调优:
|
411
|
except Exception as exc:
|
4747e2f4
tangwang
embedding perform...
|
412
413
414
415
416
417
418
|
logger.error(
"text microbatch failed | size=%d reqids=%s error=%s",
len(batch),
[task.request_id for task in batch],
exc,
exc_info=True,
)
|
efd435cf
tangwang
tei性能调优:
|
419
420
421
422
423
424
425
|
for task in batch:
task.error = exc
finally:
for task in batch:
task.done.set()
|
4747e2f4
tangwang
embedding perform...
|
426
|
def _encode_single_text_with_microbatch(text: str, normalize: bool, request_id: str) -> List[float]:
|
efd435cf
tangwang
tei性能调优:
|
427
428
429
430
|
task = _SingleTextTask(
text=text,
normalize=normalize,
created_at=time.perf_counter(),
|
4747e2f4
tangwang
embedding perform...
|
431
|
request_id=request_id,
|
efd435cf
tangwang
tei性能调优:
|
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
|
done=threading.Event(),
)
with _text_single_queue_cv:
_text_single_queue.append(task)
_text_single_queue_cv.notify()
if not task.done.wait(timeout=_TEXT_REQUEST_TIMEOUT_SEC):
with _text_single_queue_cv:
try:
_text_single_queue.remove(task)
except ValueError:
pass
raise RuntimeError(
f"Timed out waiting for text micro-batch worker ({_TEXT_REQUEST_TIMEOUT_SEC:.1f}s)"
)
if task.error is not None:
raise task.error
if task.result is None:
raise RuntimeError("Text micro-batch worker returned empty result")
return task.result
|
0a3764c4
tangwang
优化embedding模型加载
|
454
455
456
|
@app.on_event("startup")
def load_models():
"""Load models at service startup to avoid first-request latency."""
|
07cf5a93
tangwang
START_EMBEDDING=...
|
457
|
global _text_model, _image_model, _text_backend_name
|
7bfb9946
tangwang
向量化模块
|
458
|
|
7214c2e7
tangwang
mplemented**
|
459
460
461
462
463
464
|
logger.info(
"Loading embedding models at startup | service_kind=%s text_enabled=%s image_enabled=%s",
_SERVICE_KIND,
open_text_model,
open_image_model,
)
|
7bfb9946
tangwang
向量化模块
|
465
|
|
40f1e391
tangwang
cnclip
|
466
467
|
if open_text_model:
try:
|
07cf5a93
tangwang
START_EMBEDDING=...
|
468
469
470
|
backend_name, backend_cfg = get_embedding_backend_config()
_text_backend_name = backend_name
if backend_name == "tei":
|
77516841
tangwang
tidy embeddings
|
471
|
from embeddings.text_embedding_tei import TEITextModel
|
07cf5a93
tangwang
START_EMBEDDING=...
|
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
|
base_url = (
os.getenv("TEI_BASE_URL")
or backend_cfg.get("base_url")
or CONFIG.TEI_BASE_URL
)
timeout_sec = int(
os.getenv("TEI_TIMEOUT_SEC")
or backend_cfg.get("timeout_sec")
or CONFIG.TEI_TIMEOUT_SEC
)
logger.info("Loading text backend: tei (base_url=%s)", base_url)
_text_model = TEITextModel(
base_url=str(base_url),
timeout_sec=timeout_sec,
)
elif backend_name == "local_st":
|
77516841
tangwang
tidy embeddings
|
489
|
from embeddings.text_embedding_sentence_transformers import Qwen3TextModel
|
950a640e
tangwang
embeddings
|
490
|
|
07cf5a93
tangwang
START_EMBEDDING=...
|
491
492
493
494
495
496
497
|
model_id = (
os.getenv("TEXT_MODEL_ID")
or backend_cfg.get("model_id")
or CONFIG.TEXT_MODEL_ID
)
logger.info("Loading text backend: local_st (model=%s)", model_id)
_text_model = Qwen3TextModel(model_id=str(model_id))
|
efd435cf
tangwang
tei性能调优:
|
498
|
_start_text_batch_worker()
|
07cf5a93
tangwang
START_EMBEDDING=...
|
499
500
501
502
503
504
|
else:
raise ValueError(
f"Unsupported embedding backend: {backend_name}. "
"Supported: tei, local_st"
)
logger.info("Text backend loaded successfully: %s", _text_backend_name)
|
40f1e391
tangwang
cnclip
|
505
|
except Exception as e:
|
4747e2f4
tangwang
embedding perform...
|
506
|
logger.error("Failed to load text model: %s", e, exc_info=True)
|
40f1e391
tangwang
cnclip
|
507
|
raise
|
0a3764c4
tangwang
优化embedding模型加载
|
508
|
|
40f1e391
tangwang
cnclip
|
509
510
|
if open_image_model:
try:
|
c10f90fe
tangwang
cnclip
|
511
|
if CONFIG.USE_CLIP_AS_SERVICE:
|
950a640e
tangwang
embeddings
|
512
513
|
from embeddings.clip_as_service_encoder import ClipAsServiceImageEncoder
|
4747e2f4
tangwang
embedding perform...
|
514
515
516
517
518
|
logger.info(
"Loading image encoder via clip-as-service: %s (configured model: %s)",
CONFIG.CLIP_AS_SERVICE_SERVER,
CONFIG.CLIP_AS_SERVICE_MODEL_NAME,
)
|
c10f90fe
tangwang
cnclip
|
519
520
521
522
523
524
|
_image_model = ClipAsServiceImageEncoder(
server=CONFIG.CLIP_AS_SERVICE_SERVER,
batch_size=CONFIG.IMAGE_BATCH_SIZE,
)
logger.info("Image model (clip-as-service) loaded successfully")
else:
|
950a640e
tangwang
embeddings
|
525
526
|
from embeddings.clip_model import ClipImageModel
|
4747e2f4
tangwang
embedding perform...
|
527
528
529
530
531
|
logger.info(
"Loading local image model: %s (device: %s)",
CONFIG.IMAGE_MODEL_NAME,
CONFIG.IMAGE_DEVICE,
)
|
c10f90fe
tangwang
cnclip
|
532
533
534
535
536
|
_image_model = ClipImageModel(
model_name=CONFIG.IMAGE_MODEL_NAME,
device=CONFIG.IMAGE_DEVICE,
)
logger.info("Image model (local CN-CLIP) loaded successfully")
|
40f1e391
tangwang
cnclip
|
537
|
except Exception as e:
|
ed948666
tangwang
tidy
|
538
539
|
logger.error("Failed to load image model: %s", e, exc_info=True)
raise
|
0a3764c4
tangwang
优化embedding模型加载
|
540
541
|
logger.info("All embedding models loaded successfully, service ready")
|
7bfb9946
tangwang
向量化模块
|
542
543
|
|
efd435cf
tangwang
tei性能调优:
|
544
545
546
547
548
|
@app.on_event("shutdown")
def stop_workers() -> None:
_stop_text_batch_worker()
|
200fdddf
tangwang
embed norm
|
549
550
551
552
553
554
555
556
|
def _normalize_vector(vec: np.ndarray) -> np.ndarray:
norm = float(np.linalg.norm(vec))
if not np.isfinite(norm) or norm <= 0.0:
raise RuntimeError("Embedding vector has invalid norm (must be > 0)")
return vec / norm
def _as_list(embedding: Optional[np.ndarray], normalize: bool = False) -> Optional[List[float]]:
|
7bfb9946
tangwang
向量化模块
|
557
558
559
560
561
562
|
if embedding is None:
return None
if not isinstance(embedding, np.ndarray):
embedding = np.array(embedding, dtype=np.float32)
if embedding.ndim != 1:
embedding = embedding.reshape(-1)
|
200fdddf
tangwang
embed norm
|
563
564
565
566
|
embedding = embedding.astype(np.float32, copy=False)
if normalize:
embedding = _normalize_vector(embedding).astype(np.float32, copy=False)
return embedding.tolist()
|
7bfb9946
tangwang
向量化模块
|
567
568
|
|
7214c2e7
tangwang
mplemented**
|
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
|
def _try_full_text_cache_hit(
normalized: List[str],
effective_normalize: bool,
) -> Optional[_EmbedResult]:
out: List[Optional[List[float]]] = []
for text in normalized:
cached = _text_cache.get(build_text_cache_key(text, normalize=effective_normalize))
if cached is None:
return None
vec = _as_list(cached, normalize=False)
if vec is None:
return None
out.append(vec)
return _EmbedResult(
vectors=out,
cache_hits=len(out),
cache_misses=0,
backend_elapsed_ms=0.0,
mode="cache-only",
)
def _try_full_image_cache_hit(
urls: List[str],
effective_normalize: bool,
) -> Optional[_EmbedResult]:
out: List[Optional[List[float]]] = []
for url in urls:
cached = _image_cache.get(build_image_cache_key(url, normalize=effective_normalize))
if cached is None:
return None
vec = _as_list(cached, normalize=False)
if vec is None:
return None
out.append(vec)
return _EmbedResult(
vectors=out,
cache_hits=len(out),
cache_misses=0,
backend_elapsed_ms=0.0,
mode="cache-only",
)
|
7bfb9946
tangwang
向量化模块
|
613
614
|
@app.get("/health")
def health() -> Dict[str, Any]:
|
4747e2f4
tangwang
embedding perform...
|
615
|
"""Health check endpoint. Returns status and current throttling stats."""
|
7214c2e7
tangwang
mplemented**
|
616
|
ready = (not open_text_model or _text_model is not None) and (not open_image_model or _image_model is not None)
|
0a3764c4
tangwang
优化embedding模型加载
|
617
|
return {
|
7214c2e7
tangwang
mplemented**
|
618
619
|
"status": "ok" if ready else "degraded",
"service_kind": _SERVICE_KIND,
|
0a3764c4
tangwang
优化embedding模型加载
|
620
|
"text_model_loaded": _text_model is not None,
|
07cf5a93
tangwang
START_EMBEDDING=...
|
621
|
"text_backend": _text_backend_name,
|
0a3764c4
tangwang
优化embedding模型加载
|
622
|
"image_model_loaded": _image_model is not None,
|
7214c2e7
tangwang
mplemented**
|
623
624
625
626
|
"cache_enabled": {
"text": _text_cache.redis_client is not None,
"image": _image_cache.redis_client is not None,
},
|
4747e2f4
tangwang
embedding perform...
|
627
628
629
630
|
"limits": {
"text": _text_request_limiter.snapshot(),
"image": _image_request_limiter.snapshot(),
},
|
7214c2e7
tangwang
mplemented**
|
631
632
633
634
|
"stats": {
"text": _text_stats.snapshot(),
"image": _image_stats.snapshot(),
},
|
4747e2f4
tangwang
embedding perform...
|
635
636
637
638
639
640
|
"text_microbatch": {
"window_ms": round(_TEXT_MICROBATCH_WINDOW_SEC * 1000.0, 3),
"queue_depth": len(_text_single_queue),
"worker_alive": bool(_text_batch_worker is not None and _text_batch_worker.is_alive()),
"request_timeout_sec": _TEXT_REQUEST_TIMEOUT_SEC,
},
|
0a3764c4
tangwang
优化embedding模型加载
|
641
|
}
|
7bfb9946
tangwang
向量化模块
|
642
643
|
|
7214c2e7
tangwang
mplemented**
|
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
|
@app.get("/ready")
def ready() -> Dict[str, Any]:
text_ready = (not open_text_model) or (_text_model is not None)
image_ready = (not open_image_model) or (_image_model is not None)
if not (text_ready and image_ready):
raise HTTPException(
status_code=503,
detail={
"service_kind": _SERVICE_KIND,
"text_ready": text_ready,
"image_ready": image_ready,
},
)
return {
"status": "ready",
"service_kind": _SERVICE_KIND,
"text_ready": text_ready,
"image_ready": image_ready,
}
|
4747e2f4
tangwang
embedding perform...
|
665
666
667
668
|
def _embed_text_impl(
normalized: List[str],
effective_normalize: bool,
request_id: str,
|
7214c2e7
tangwang
mplemented**
|
669
|
) -> _EmbedResult:
|
0a3764c4
tangwang
优化embedding模型加载
|
670
671
|
if _text_model is None:
raise RuntimeError("Text model not loaded")
|
28e57bb1
tangwang
日志体系优化
|
672
|
|
7214c2e7
tangwang
mplemented**
|
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
|
out: List[Optional[List[float]]] = [None] * len(normalized)
missing_indices: List[int] = []
missing_texts: List[str] = []
missing_cache_keys: List[str] = []
cache_hits = 0
for idx, text in enumerate(normalized):
cache_key = build_text_cache_key(text, normalize=effective_normalize)
cached = _text_cache.get(cache_key)
if cached is not None:
vec = _as_list(cached, normalize=False)
if vec is not None:
out[idx] = vec
cache_hits += 1
continue
missing_indices.append(idx)
missing_texts.append(text)
missing_cache_keys.append(cache_key)
if not missing_texts:
logger.info(
"text backend done | backend=%s mode=cache-only inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=0 backend_elapsed_ms=0.00",
_text_backend_name,
len(normalized),
effective_normalize,
len(out[0]) if out and out[0] is not None else 0,
cache_hits,
extra=_request_log_extra(request_id),
)
return _EmbedResult(
vectors=out,
cache_hits=cache_hits,
cache_misses=0,
backend_elapsed_ms=0.0,
mode="cache-only",
)
backend_t0 = time.perf_counter()
|
54ccf28c
tangwang
tei
|
710
|
try:
|
efd435cf
tangwang
tei性能调优:
|
711
|
if _text_backend_name == "local_st":
|
7214c2e7
tangwang
mplemented**
|
712
713
|
if len(missing_texts) == 1 and _text_batch_worker is not None:
computed = [
|
4747e2f4
tangwang
embedding perform...
|
714
|
_encode_single_text_with_microbatch(
|
7214c2e7
tangwang
mplemented**
|
715
|
missing_texts[0],
|
4747e2f4
tangwang
embedding perform...
|
716
717
718
719
|
normalize=effective_normalize,
request_id=request_id,
)
]
|
7214c2e7
tangwang
mplemented**
|
720
721
722
723
724
725
726
727
728
729
|
mode = "microbatch-single"
else:
embs = _encode_local_st(missing_texts, normalize_embeddings=False)
computed = []
for i, emb in enumerate(embs):
vec = _as_list(emb, normalize=effective_normalize)
if vec is None:
raise RuntimeError(f"Text model returned empty embedding for missing index {i}")
computed.append(vec)
mode = "direct-batch"
|
efd435cf
tangwang
tei性能调优:
|
730
|
else:
|
77516841
tangwang
tidy embeddings
|
731
|
embs = _text_model.encode(
|
7214c2e7
tangwang
mplemented**
|
732
|
missing_texts,
|
54ccf28c
tangwang
tei
|
733
734
|
batch_size=int(CONFIG.TEXT_BATCH_SIZE),
device=CONFIG.TEXT_DEVICE,
|
200fdddf
tangwang
embed norm
|
735
|
normalize_embeddings=effective_normalize,
|
54ccf28c
tangwang
tei
|
736
|
)
|
7214c2e7
tangwang
mplemented**
|
737
738
739
740
741
742
|
computed = []
for i, emb in enumerate(embs):
vec = _as_list(emb, normalize=False)
if vec is None:
raise RuntimeError(f"Text model returned empty embedding for missing index {i}")
computed.append(vec)
|
4747e2f4
tangwang
embedding perform...
|
743
|
mode = "backend-batch"
|
54ccf28c
tangwang
tei
|
744
|
except Exception as e:
|
4747e2f4
tangwang
embedding perform...
|
745
746
747
748
749
750
751
752
|
logger.error(
"Text embedding backend failure: %s",
e,
exc_info=True,
extra=_request_log_extra(request_id),
)
raise RuntimeError(f"Text embedding backend failure: {e}") from e
|
7214c2e7
tangwang
mplemented**
|
753
|
if len(computed) != len(missing_texts):
|
ed948666
tangwang
tidy
|
754
|
raise RuntimeError(
|
7214c2e7
tangwang
mplemented**
|
755
756
|
f"Text model response length mismatch: expected {len(missing_texts)}, "
f"got {len(computed)}"
|
ed948666
tangwang
tidy
|
757
|
)
|
4747e2f4
tangwang
embedding perform...
|
758
|
|
7214c2e7
tangwang
mplemented**
|
759
760
761
762
763
|
for pos, cache_key, vec in zip(missing_indices, missing_cache_keys, computed):
out[pos] = vec
_text_cache.set(cache_key, np.asarray(vec, dtype=np.float32))
backend_elapsed_ms = (time.perf_counter() - backend_t0) * 1000.0
|
4747e2f4
tangwang
embedding perform...
|
764
|
|
efd435cf
tangwang
tei性能调优:
|
765
|
logger.info(
|
7214c2e7
tangwang
mplemented**
|
766
|
"text backend done | backend=%s mode=%s inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=%d backend_elapsed_ms=%.2f",
|
efd435cf
tangwang
tei性能调优:
|
767
|
_text_backend_name,
|
4747e2f4
tangwang
embedding perform...
|
768
|
mode,
|
efd435cf
tangwang
tei性能调优:
|
769
770
|
len(normalized),
effective_normalize,
|
28e57bb1
tangwang
日志体系优化
|
771
|
len(out[0]) if out and out[0] is not None else 0,
|
7214c2e7
tangwang
mplemented**
|
772
773
774
|
cache_hits,
len(missing_texts),
backend_elapsed_ms,
|
4747e2f4
tangwang
embedding perform...
|
775
|
extra=_request_log_extra(request_id),
|
efd435cf
tangwang
tei性能调优:
|
776
|
)
|
7214c2e7
tangwang
mplemented**
|
777
778
779
780
781
782
783
|
return _EmbedResult(
vectors=out,
cache_hits=cache_hits,
cache_misses=len(missing_texts),
backend_elapsed_ms=backend_elapsed_ms,
mode=mode,
)
|
7bfb9946
tangwang
向量化模块
|
784
785
|
|
4747e2f4
tangwang
embedding perform...
|
786
787
788
789
790
791
792
|
@app.post("/embed/text")
async def embed_text(
texts: List[str],
http_request: Request,
response: Response,
normalize: Optional[bool] = None,
) -> List[Optional[List[float]]]:
|
7214c2e7
tangwang
mplemented**
|
793
794
795
|
if _text_model is None:
raise HTTPException(status_code=503, detail="Text embedding model not loaded in this service")
|
4747e2f4
tangwang
embedding perform...
|
796
797
798
799
800
801
802
803
804
|
request_id = _resolve_request_id(http_request)
response.headers["X-Request-ID"] = request_id
effective_normalize = bool(CONFIG.TEXT_NORMALIZE_EMBEDDINGS) if normalize is None else bool(normalize)
normalized: List[str] = []
for i, t in enumerate(texts):
if not isinstance(t, str):
raise HTTPException(status_code=400, detail=f"Invalid text at index {i}: must be string")
s = t.strip()
|
ed948666
tangwang
tidy
|
805
|
if not s:
|
4747e2f4
tangwang
embedding perform...
|
806
807
|
raise HTTPException(status_code=400, detail=f"Invalid text at index {i}: empty string")
normalized.append(s)
|
c10f90fe
tangwang
cnclip
|
808
|
|
7214c2e7
tangwang
mplemented**
|
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
|
cache_check_started = time.perf_counter()
cache_only = _try_full_text_cache_hit(normalized, effective_normalize)
if cache_only is not None:
latency_ms = (time.perf_counter() - cache_check_started) * 1000.0
_text_stats.record_completed(
success=True,
latency_ms=latency_ms,
backend_latency_ms=0.0,
cache_hits=cache_only.cache_hits,
cache_misses=0,
)
logger.info(
"embed_text response | backend=%s mode=cache-only inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=0 first_vector=%s latency_ms=%.2f",
_text_backend_name,
len(normalized),
effective_normalize,
len(cache_only.vectors[0]) if cache_only.vectors and cache_only.vectors[0] is not None else 0,
cache_only.cache_hits,
_preview_vector(cache_only.vectors[0] if cache_only.vectors else None),
latency_ms,
extra=_request_log_extra(request_id),
)
return cache_only.vectors
|
4747e2f4
tangwang
embedding perform...
|
833
834
|
accepted, active = _text_request_limiter.try_acquire()
if not accepted:
|
7214c2e7
tangwang
mplemented**
|
835
|
_text_stats.record_rejected()
|
4747e2f4
tangwang
embedding perform...
|
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
|
logger.warning(
"embed_text rejected | client=%s backend=%s inputs=%d normalize=%s active=%d limit=%d preview=%s",
_request_client(http_request),
_text_backend_name,
len(normalized),
effective_normalize,
active,
_TEXT_MAX_INFLIGHT,
_preview_inputs(normalized, _LOG_PREVIEW_COUNT, _LOG_TEXT_PREVIEW_CHARS),
extra=_request_log_extra(request_id),
)
raise HTTPException(
status_code=_OVERLOAD_STATUS_CODE,
detail=f"Text embedding service busy: active={active}, limit={_TEXT_MAX_INFLIGHT}",
)
request_started = time.perf_counter()
success = False
|
7214c2e7
tangwang
mplemented**
|
854
855
856
|
backend_elapsed_ms = 0.0
cache_hits = 0
cache_misses = 0
|
4747e2f4
tangwang
embedding perform...
|
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
|
try:
logger.info(
"embed_text request | client=%s backend=%s inputs=%d normalize=%s active=%d limit=%d preview=%s",
_request_client(http_request),
_text_backend_name,
len(normalized),
effective_normalize,
active,
_TEXT_MAX_INFLIGHT,
_preview_inputs(normalized, _LOG_PREVIEW_COUNT, _LOG_TEXT_PREVIEW_CHARS),
extra=_request_log_extra(request_id),
)
verbose_logger.info(
"embed_text detail | payload=%s normalize=%s backend=%s",
normalized,
effective_normalize,
_text_backend_name,
extra=_request_log_extra(request_id),
)
|
7214c2e7
tangwang
mplemented**
|
876
|
result = await run_in_threadpool(_embed_text_impl, normalized, effective_normalize, request_id)
|
4747e2f4
tangwang
embedding perform...
|
877
|
success = True
|
7214c2e7
tangwang
mplemented**
|
878
879
880
|
backend_elapsed_ms = result.backend_elapsed_ms
cache_hits = result.cache_hits
cache_misses = result.cache_misses
|
4747e2f4
tangwang
embedding perform...
|
881
|
latency_ms = (time.perf_counter() - request_started) * 1000.0
|
7214c2e7
tangwang
mplemented**
|
882
883
884
885
886
887
888
|
_text_stats.record_completed(
success=True,
latency_ms=latency_ms,
backend_latency_ms=backend_elapsed_ms,
cache_hits=cache_hits,
cache_misses=cache_misses,
)
|
4747e2f4
tangwang
embedding perform...
|
889
|
logger.info(
|
7214c2e7
tangwang
mplemented**
|
890
|
"embed_text response | backend=%s mode=%s inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=%d first_vector=%s latency_ms=%.2f",
|
4747e2f4
tangwang
embedding perform...
|
891
|
_text_backend_name,
|
7214c2e7
tangwang
mplemented**
|
892
|
result.mode,
|
4747e2f4
tangwang
embedding perform...
|
893
894
|
len(normalized),
effective_normalize,
|
7214c2e7
tangwang
mplemented**
|
895
896
897
898
|
len(result.vectors[0]) if result.vectors and result.vectors[0] is not None else 0,
cache_hits,
cache_misses,
_preview_vector(result.vectors[0] if result.vectors else None),
|
4747e2f4
tangwang
embedding perform...
|
899
900
901
902
903
|
latency_ms,
extra=_request_log_extra(request_id),
)
verbose_logger.info(
"embed_text result detail | count=%d first_vector=%s latency_ms=%.2f",
|
7214c2e7
tangwang
mplemented**
|
904
905
906
907
|
len(result.vectors),
result.vectors[0][: _VECTOR_PREVIEW_DIMS]
if result.vectors and result.vectors[0] is not None
else [],
|
4747e2f4
tangwang
embedding perform...
|
908
909
910
|
latency_ms,
extra=_request_log_extra(request_id),
)
|
7214c2e7
tangwang
mplemented**
|
911
|
return result.vectors
|
4747e2f4
tangwang
embedding perform...
|
912
913
914
915
|
except HTTPException:
raise
except Exception as e:
latency_ms = (time.perf_counter() - request_started) * 1000.0
|
7214c2e7
tangwang
mplemented**
|
916
917
918
919
920
921
922
|
_text_stats.record_completed(
success=False,
latency_ms=latency_ms,
backend_latency_ms=backend_elapsed_ms,
cache_hits=cache_hits,
cache_misses=cache_misses,
)
|
4747e2f4
tangwang
embedding perform...
|
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
|
logger.error(
"embed_text failed | backend=%s inputs=%d normalize=%s latency_ms=%.2f error=%s",
_text_backend_name,
len(normalized),
effective_normalize,
latency_ms,
e,
exc_info=True,
extra=_request_log_extra(request_id),
)
raise HTTPException(status_code=502, detail=str(e)) from e
finally:
remaining = _text_request_limiter.release(success=success)
logger.info(
"embed_text finalize | success=%s active_after=%d",
success,
remaining,
extra=_request_log_extra(request_id),
)
def _embed_image_impl(
urls: List[str],
effective_normalize: bool,
request_id: str,
|
7214c2e7
tangwang
mplemented**
|
948
|
) -> _EmbedResult:
|
4747e2f4
tangwang
embedding perform...
|
949
950
|
if _image_model is None:
raise RuntimeError("Image model not loaded")
|
28e57bb1
tangwang
日志体系优化
|
951
|
|
7214c2e7
tangwang
mplemented**
|
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
|
out: List[Optional[List[float]]] = [None] * len(urls)
missing_indices: List[int] = []
missing_urls: List[str] = []
missing_cache_keys: List[str] = []
cache_hits = 0
for idx, url in enumerate(urls):
cache_key = build_image_cache_key(url, normalize=effective_normalize)
cached = _image_cache.get(cache_key)
if cached is not None:
vec = _as_list(cached, normalize=False)
if vec is not None:
out[idx] = vec
cache_hits += 1
continue
missing_indices.append(idx)
missing_urls.append(url)
missing_cache_keys.append(cache_key)
if not missing_urls:
logger.info(
"image backend done | mode=cache-only inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=0 backend_elapsed_ms=0.00",
len(urls),
effective_normalize,
len(out[0]) if out and out[0] is not None else 0,
cache_hits,
extra=_request_log_extra(request_id),
)
return _EmbedResult(
vectors=out,
cache_hits=cache_hits,
cache_misses=0,
backend_elapsed_ms=0.0,
mode="cache-only",
)
backend_t0 = time.perf_counter()
|
7bfb9946
tangwang
向量化模块
|
988
|
with _image_encode_lock:
|
200fdddf
tangwang
embed norm
|
989
|
vectors = _image_model.encode_image_urls(
|
7214c2e7
tangwang
mplemented**
|
990
|
missing_urls,
|
200fdddf
tangwang
embed norm
|
991
992
993
|
batch_size=CONFIG.IMAGE_BATCH_SIZE,
normalize_embeddings=effective_normalize,
)
|
7214c2e7
tangwang
mplemented**
|
994
|
if vectors is None or len(vectors) != len(missing_urls):
|
ed948666
tangwang
tidy
|
995
|
raise RuntimeError(
|
7214c2e7
tangwang
mplemented**
|
996
|
f"Image model response length mismatch: expected {len(missing_urls)}, "
|
ed948666
tangwang
tidy
|
997
998
|
f"got {0 if vectors is None else len(vectors)}"
)
|
4747e2f4
tangwang
embedding perform...
|
999
|
|
7214c2e7
tangwang
mplemented**
|
1000
|
for pos, cache_key, vec in zip(missing_indices, missing_cache_keys, vectors):
|
200fdddf
tangwang
embed norm
|
1001
|
out_vec = _as_list(vec, normalize=effective_normalize)
|
ed948666
tangwang
tidy
|
1002
|
if out_vec is None:
|
7214c2e7
tangwang
mplemented**
|
1003
1004
1005
1006
1007
|
raise RuntimeError(f"Image model returned empty embedding for position {pos}")
out[pos] = out_vec
_image_cache.set(cache_key, np.asarray(out_vec, dtype=np.float32))
backend_elapsed_ms = (time.perf_counter() - backend_t0) * 1000.0
|
4747e2f4
tangwang
embedding perform...
|
1008
|
|
28e57bb1
tangwang
日志体系优化
|
1009
|
logger.info(
|
7214c2e7
tangwang
mplemented**
|
1010
|
"image backend done | mode=backend-batch inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=%d backend_elapsed_ms=%.2f",
|
28e57bb1
tangwang
日志体系优化
|
1011
1012
1013
|
len(urls),
effective_normalize,
len(out[0]) if out and out[0] is not None else 0,
|
7214c2e7
tangwang
mplemented**
|
1014
1015
1016
|
cache_hits,
len(missing_urls),
backend_elapsed_ms,
|
4747e2f4
tangwang
embedding perform...
|
1017
|
extra=_request_log_extra(request_id),
|
28e57bb1
tangwang
日志体系优化
|
1018
|
)
|
7214c2e7
tangwang
mplemented**
|
1019
1020
1021
1022
1023
1024
1025
|
return _EmbedResult(
vectors=out,
cache_hits=cache_hits,
cache_misses=len(missing_urls),
backend_elapsed_ms=backend_elapsed_ms,
mode="backend-batch",
)
|
4747e2f4
tangwang
embedding perform...
|
1026
1027
1028
1029
1030
1031
1032
1033
1034
|
@app.post("/embed/image")
async def embed_image(
images: List[str],
http_request: Request,
response: Response,
normalize: Optional[bool] = None,
) -> List[Optional[List[float]]]:
|
7214c2e7
tangwang
mplemented**
|
1035
1036
1037
|
if _image_model is None:
raise HTTPException(status_code=503, detail="Image embedding model not loaded in this service")
|
4747e2f4
tangwang
embedding perform...
|
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
|
request_id = _resolve_request_id(http_request)
response.headers["X-Request-ID"] = request_id
effective_normalize = bool(CONFIG.IMAGE_NORMALIZE_EMBEDDINGS) if normalize is None else bool(normalize)
urls: List[str] = []
for i, url_or_path in enumerate(images):
if not isinstance(url_or_path, str):
raise HTTPException(status_code=400, detail=f"Invalid image at index {i}: must be string URL/path")
s = url_or_path.strip()
if not s:
raise HTTPException(status_code=400, detail=f"Invalid image at index {i}: empty URL/path")
urls.append(s)
|
7214c2e7
tangwang
mplemented**
|
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
|
cache_check_started = time.perf_counter()
cache_only = _try_full_image_cache_hit(urls, effective_normalize)
if cache_only is not None:
latency_ms = (time.perf_counter() - cache_check_started) * 1000.0
_image_stats.record_completed(
success=True,
latency_ms=latency_ms,
backend_latency_ms=0.0,
cache_hits=cache_only.cache_hits,
cache_misses=0,
)
logger.info(
"embed_image response | mode=cache-only inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=0 first_vector=%s latency_ms=%.2f",
len(urls),
effective_normalize,
len(cache_only.vectors[0]) if cache_only.vectors and cache_only.vectors[0] is not None else 0,
cache_only.cache_hits,
_preview_vector(cache_only.vectors[0] if cache_only.vectors else None),
latency_ms,
extra=_request_log_extra(request_id),
)
return cache_only.vectors
|
4747e2f4
tangwang
embedding perform...
|
1074
1075
|
accepted, active = _image_request_limiter.try_acquire()
if not accepted:
|
7214c2e7
tangwang
mplemented**
|
1076
|
_image_stats.record_rejected()
|
4747e2f4
tangwang
embedding perform...
|
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
|
logger.warning(
"embed_image rejected | client=%s inputs=%d normalize=%s active=%d limit=%d preview=%s",
_request_client(http_request),
len(urls),
effective_normalize,
active,
_IMAGE_MAX_INFLIGHT,
_preview_inputs(urls, _LOG_PREVIEW_COUNT, _LOG_IMAGE_PREVIEW_CHARS),
extra=_request_log_extra(request_id),
)
raise HTTPException(
status_code=_OVERLOAD_STATUS_CODE,
detail=f"Image embedding service busy: active={active}, limit={_IMAGE_MAX_INFLIGHT}",
)
request_started = time.perf_counter()
success = False
|
7214c2e7
tangwang
mplemented**
|
1094
1095
1096
|
backend_elapsed_ms = 0.0
cache_hits = 0
cache_misses = 0
|
4747e2f4
tangwang
embedding perform...
|
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
|
try:
logger.info(
"embed_image request | client=%s inputs=%d normalize=%s active=%d limit=%d preview=%s",
_request_client(http_request),
len(urls),
effective_normalize,
active,
_IMAGE_MAX_INFLIGHT,
_preview_inputs(urls, _LOG_PREVIEW_COUNT, _LOG_IMAGE_PREVIEW_CHARS),
extra=_request_log_extra(request_id),
)
verbose_logger.info(
"embed_image detail | payload=%s normalize=%s",
urls,
effective_normalize,
extra=_request_log_extra(request_id),
)
|
7214c2e7
tangwang
mplemented**
|
1114
|
result = await run_in_threadpool(_embed_image_impl, urls, effective_normalize, request_id)
|
4747e2f4
tangwang
embedding perform...
|
1115
|
success = True
|
7214c2e7
tangwang
mplemented**
|
1116
1117
1118
|
backend_elapsed_ms = result.backend_elapsed_ms
cache_hits = result.cache_hits
cache_misses = result.cache_misses
|
4747e2f4
tangwang
embedding perform...
|
1119
|
latency_ms = (time.perf_counter() - request_started) * 1000.0
|
7214c2e7
tangwang
mplemented**
|
1120
1121
1122
1123
1124
1125
1126
|
_image_stats.record_completed(
success=True,
latency_ms=latency_ms,
backend_latency_ms=backend_elapsed_ms,
cache_hits=cache_hits,
cache_misses=cache_misses,
)
|
4747e2f4
tangwang
embedding perform...
|
1127
|
logger.info(
|
7214c2e7
tangwang
mplemented**
|
1128
1129
|
"embed_image response | mode=%s inputs=%d normalize=%s dim=%d cache_hits=%d cache_misses=%d first_vector=%s latency_ms=%.2f",
result.mode,
|
4747e2f4
tangwang
embedding perform...
|
1130
1131
|
len(urls),
effective_normalize,
|
7214c2e7
tangwang
mplemented**
|
1132
1133
1134
1135
|
len(result.vectors[0]) if result.vectors and result.vectors[0] is not None else 0,
cache_hits,
cache_misses,
_preview_vector(result.vectors[0] if result.vectors else None),
|
4747e2f4
tangwang
embedding perform...
|
1136
1137
1138
1139
1140
|
latency_ms,
extra=_request_log_extra(request_id),
)
verbose_logger.info(
"embed_image result detail | count=%d first_vector=%s latency_ms=%.2f",
|
7214c2e7
tangwang
mplemented**
|
1141
1142
1143
1144
|
len(result.vectors),
result.vectors[0][: _VECTOR_PREVIEW_DIMS]
if result.vectors and result.vectors[0] is not None
else [],
|
4747e2f4
tangwang
embedding perform...
|
1145
1146
1147
|
latency_ms,
extra=_request_log_extra(request_id),
)
|
7214c2e7
tangwang
mplemented**
|
1148
|
return result.vectors
|
4747e2f4
tangwang
embedding perform...
|
1149
1150
1151
1152
|
except HTTPException:
raise
except Exception as e:
latency_ms = (time.perf_counter() - request_started) * 1000.0
|
7214c2e7
tangwang
mplemented**
|
1153
1154
1155
1156
1157
1158
1159
|
_image_stats.record_completed(
success=False,
latency_ms=latency_ms,
backend_latency_ms=backend_elapsed_ms,
cache_hits=cache_hits,
cache_misses=cache_misses,
)
|
4747e2f4
tangwang
embedding perform...
|
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
|
logger.error(
"embed_image failed | inputs=%d normalize=%s latency_ms=%.2f error=%s",
len(urls),
effective_normalize,
latency_ms,
e,
exc_info=True,
extra=_request_log_extra(request_id),
)
raise HTTPException(status_code=502, detail=f"Image embedding backend failure: {e}") from e
finally:
remaining = _image_request_limiter.release(success=success)
logger.info(
"embed_image finalize | success=%s active_after=%d",
success,
remaining,
extra=_request_log_extra(request_id),
)
|