server.py 13.9 KB
"""
Embedding service (FastAPI).

API (simple list-in, list-out; aligned by index):
- POST /embed/text   body: ["text1", "text2", ...] -> [[...], ...]
- POST /embed/image  body: ["url_or_path1", ...]  -> [[...], ...]
"""

import logging
import os
import threading
import time
from collections import deque
from dataclasses import dataclass
from typing import Any, Dict, List, Optional

import numpy as np
from fastapi import FastAPI, HTTPException

from embeddings.config import CONFIG
from embeddings.protocols import ImageEncoderProtocol
from config.services_config import get_embedding_backend_config

logger = logging.getLogger(__name__)

app = FastAPI(title="saas-search Embedding Service", version="1.0.0")

# Models are loaded at startup, not lazily
_text_model: Optional[Any] = None
_image_model: Optional[ImageEncoderProtocol] = None
_text_backend_name: str = ""
open_text_model = os.getenv("EMBEDDING_ENABLE_TEXT_MODEL", "true").lower() in ("1", "true", "yes")
open_image_model = os.getenv("EMBEDDING_ENABLE_IMAGE_MODEL", "true").lower() in ("1", "true", "yes")

_text_encode_lock = threading.Lock()
_image_encode_lock = threading.Lock()


@dataclass
class _SingleTextTask:
    text: str
    normalize: bool
    created_at: float
    done: threading.Event
    result: Optional[List[float]] = None
    error: Optional[Exception] = None


_text_single_queue: "deque[_SingleTextTask]" = deque()
_text_single_queue_cv = threading.Condition()
_text_batch_worker: Optional[threading.Thread] = None
_text_batch_worker_stop = False
_TEXT_MICROBATCH_WINDOW_SEC = max(
    0.0, float(os.getenv("TEXT_MICROBATCH_WINDOW_MS", "4")) / 1000.0
)
_TEXT_REQUEST_TIMEOUT_SEC = max(
    1.0, float(os.getenv("TEXT_REQUEST_TIMEOUT_SEC", "30"))
)


def _encode_local_st(texts: List[str], normalize_embeddings: bool) -> Any:
    with _text_encode_lock:
        return _text_model.encode_batch(
            texts,
            batch_size=int(CONFIG.TEXT_BATCH_SIZE),
            device=CONFIG.TEXT_DEVICE,
            normalize_embeddings=normalize_embeddings,
        )


def _start_text_batch_worker() -> None:
    global _text_batch_worker, _text_batch_worker_stop
    if _text_batch_worker is not None and _text_batch_worker.is_alive():
        return
    _text_batch_worker_stop = False
    _text_batch_worker = threading.Thread(
        target=_text_batch_worker_loop,
        name="embed-text-microbatch-worker",
        daemon=True,
    )
    _text_batch_worker.start()
    logger.info(
        "Started local_st text micro-batch worker | window_ms=%.1f max_batch=%d",
        _TEXT_MICROBATCH_WINDOW_SEC * 1000.0,
        int(CONFIG.TEXT_BATCH_SIZE),
    )


def _stop_text_batch_worker() -> None:
    global _text_batch_worker_stop
    with _text_single_queue_cv:
        _text_batch_worker_stop = True
        _text_single_queue_cv.notify_all()


def _text_batch_worker_loop() -> None:
    max_batch = max(1, int(CONFIG.TEXT_BATCH_SIZE))
    while True:
        with _text_single_queue_cv:
            while not _text_single_queue and not _text_batch_worker_stop:
                _text_single_queue_cv.wait()
            if _text_batch_worker_stop:
                return

            batch: List[_SingleTextTask] = [_text_single_queue.popleft()]
            deadline = time.perf_counter() + _TEXT_MICROBATCH_WINDOW_SEC

            while len(batch) < max_batch:
                remaining = deadline - time.perf_counter()
                if remaining <= 0:
                    break
                if not _text_single_queue:
                    _text_single_queue_cv.wait(timeout=remaining)
                    continue
                while _text_single_queue and len(batch) < max_batch:
                    batch.append(_text_single_queue.popleft())

        try:
            embs = _encode_local_st([task.text for task in batch], normalize_embeddings=False)
            if embs is None or len(embs) != len(batch):
                raise RuntimeError(
                    f"Text model response length mismatch in micro-batch: "
                    f"expected {len(batch)}, got {0 if embs is None else len(embs)}"
                )
            for task, emb in zip(batch, embs):
                vec = _as_list(emb, normalize=task.normalize)
                if vec is None:
                    raise RuntimeError("Text model returned empty embedding in micro-batch")
                task.result = vec
        except Exception as exc:
            for task in batch:
                task.error = exc
        finally:
            for task in batch:
                task.done.set()


def _encode_single_text_with_microbatch(text: str, normalize: bool) -> List[float]:
    task = _SingleTextTask(
        text=text,
        normalize=normalize,
        created_at=time.perf_counter(),
        done=threading.Event(),
    )
    with _text_single_queue_cv:
        _text_single_queue.append(task)
        _text_single_queue_cv.notify()

    if not task.done.wait(timeout=_TEXT_REQUEST_TIMEOUT_SEC):
        with _text_single_queue_cv:
            try:
                _text_single_queue.remove(task)
            except ValueError:
                pass
        raise RuntimeError(
            f"Timed out waiting for text micro-batch worker ({_TEXT_REQUEST_TIMEOUT_SEC:.1f}s)"
        )
    if task.error is not None:
        raise task.error
    if task.result is None:
        raise RuntimeError("Text micro-batch worker returned empty result")
    return task.result


@app.on_event("startup")
def load_models():
    """Load models at service startup to avoid first-request latency."""
    global _text_model, _image_model, _text_backend_name

    logger.info("Loading embedding models at startup...")

    # Load text model
    if open_text_model:
        try:
            backend_name, backend_cfg = get_embedding_backend_config()
            _text_backend_name = backend_name
            if backend_name == "tei":
                from embeddings.tei_model import TEITextModel

                base_url = (
                    os.getenv("TEI_BASE_URL")
                    or backend_cfg.get("base_url")
                    or CONFIG.TEI_BASE_URL
                )
                timeout_sec = int(
                    os.getenv("TEI_TIMEOUT_SEC")
                    or backend_cfg.get("timeout_sec")
                    or CONFIG.TEI_TIMEOUT_SEC
                )
                logger.info("Loading text backend: tei (base_url=%s)", base_url)
                _text_model = TEITextModel(
                    base_url=str(base_url),
                    timeout_sec=timeout_sec,
                )
            elif backend_name == "local_st":
                from embeddings.qwen3_model import Qwen3TextModel

                model_id = (
                    os.getenv("TEXT_MODEL_ID")
                    or backend_cfg.get("model_id")
                    or CONFIG.TEXT_MODEL_ID
                )
                logger.info("Loading text backend: local_st (model=%s)", model_id)
                _text_model = Qwen3TextModel(model_id=str(model_id))
                _start_text_batch_worker()
            else:
                raise ValueError(
                    f"Unsupported embedding backend: {backend_name}. "
                    "Supported: tei, local_st"
                )
            logger.info("Text backend loaded successfully: %s", _text_backend_name)
        except Exception as e:
            logger.error(f"Failed to load text model: {e}", exc_info=True)
            raise
    

    # Load image model: clip-as-service (recommended) or local CN-CLIP
    if open_image_model:
        try:
            if CONFIG.USE_CLIP_AS_SERVICE:
                from embeddings.clip_as_service_encoder import ClipAsServiceImageEncoder

                logger.info(f"Loading image encoder via clip-as-service: {CONFIG.CLIP_AS_SERVICE_SERVER}")
                _image_model = ClipAsServiceImageEncoder(
                    server=CONFIG.CLIP_AS_SERVICE_SERVER,
                    batch_size=CONFIG.IMAGE_BATCH_SIZE,
                )
                logger.info("Image model (clip-as-service) loaded successfully")
            else:
                from embeddings.clip_model import ClipImageModel

                logger.info(f"Loading local image model: {CONFIG.IMAGE_MODEL_NAME} (device: {CONFIG.IMAGE_DEVICE})")
                _image_model = ClipImageModel(
                    model_name=CONFIG.IMAGE_MODEL_NAME,
                    device=CONFIG.IMAGE_DEVICE,
                )
                logger.info("Image model (local CN-CLIP) loaded successfully")
        except Exception as e:
            logger.error("Failed to load image model: %s", e, exc_info=True)
            raise

    logger.info("All embedding models loaded successfully, service ready")


@app.on_event("shutdown")
def stop_workers() -> None:
    _stop_text_batch_worker()


def _normalize_vector(vec: np.ndarray) -> np.ndarray:
    norm = float(np.linalg.norm(vec))
    if not np.isfinite(norm) or norm <= 0.0:
        raise RuntimeError("Embedding vector has invalid norm (must be > 0)")
    return vec / norm


def _as_list(embedding: Optional[np.ndarray], normalize: bool = False) -> Optional[List[float]]:
    if embedding is None:
        return None
    if not isinstance(embedding, np.ndarray):
        embedding = np.array(embedding, dtype=np.float32)
    if embedding.ndim != 1:
        embedding = embedding.reshape(-1)
    embedding = embedding.astype(np.float32, copy=False)
    if normalize:
        embedding = _normalize_vector(embedding).astype(np.float32, copy=False)
    return embedding.tolist()


@app.get("/health")
def health() -> Dict[str, Any]:
    """Health check endpoint. Returns status and model loading state."""
    return {
        "status": "ok",
        "text_model_loaded": _text_model is not None,
        "text_backend": _text_backend_name,
        "image_model_loaded": _image_model is not None,
    }


@app.post("/embed/text")
def embed_text(texts: List[str], normalize: Optional[bool] = None) -> List[Optional[List[float]]]:
    if _text_model is None:
        raise RuntimeError("Text model not loaded")
    effective_normalize = bool(CONFIG.TEXT_NORMALIZE_EMBEDDINGS) if normalize is None else bool(normalize)
    normalized: List[str] = []
    for i, t in enumerate(texts):
        if not isinstance(t, str):
            raise HTTPException(status_code=400, detail=f"Invalid text at index {i}: must be string")
        s = t.strip()
        if not s:
            raise HTTPException(status_code=400, detail=f"Invalid text at index {i}: empty string")
        normalized.append(s)

    t0 = time.perf_counter()
    try:
        # local_st backend uses in-process torch model, keep serialized encode for safety;
        # TEI backend is an HTTP client and supports concurrent requests.
        if _text_backend_name == "local_st":
            if len(normalized) == 1 and _text_batch_worker is not None:
                out = [_encode_single_text_with_microbatch(normalized[0], normalize=effective_normalize)]
                elapsed_ms = (time.perf_counter() - t0) * 1000.0
                logger.info(
                    "embed_text done | backend=%s mode=microbatch-single inputs=%d normalize=%s elapsed_ms=%.2f",
                    _text_backend_name,
                    len(normalized),
                    effective_normalize,
                    elapsed_ms,
                )
                return out
            embs = _encode_local_st(normalized, normalize_embeddings=False)
        else:
            embs = _text_model.encode_batch(
                normalized,
                batch_size=int(CONFIG.TEXT_BATCH_SIZE),
                device=CONFIG.TEXT_DEVICE,
                normalize_embeddings=effective_normalize,
            )
    except Exception as e:
        logger.error("Text embedding backend failure: %s", e, exc_info=True)
        raise HTTPException(
            status_code=502,
            detail=f"Text embedding backend failure: {e}",
        ) from e
    if embs is None or len(embs) != len(normalized):
        raise RuntimeError(
            f"Text model response length mismatch: expected {len(normalized)}, "
            f"got {0 if embs is None else len(embs)}"
        )
    out: List[Optional[List[float]]] = []
    for i, emb in enumerate(embs):
        vec = _as_list(emb, normalize=effective_normalize)
        if vec is None:
            raise RuntimeError(f"Text model returned empty embedding for index {i}")
        out.append(vec)
    elapsed_ms = (time.perf_counter() - t0) * 1000.0
    logger.info(
        "embed_text done | backend=%s inputs=%d normalize=%s elapsed_ms=%.2f",
        _text_backend_name,
        len(normalized),
        effective_normalize,
        elapsed_ms,
    )
    return out


@app.post("/embed/image")
def embed_image(images: List[str], normalize: Optional[bool] = None) -> List[Optional[List[float]]]:
    if _image_model is None:
        raise RuntimeError("Image model not loaded")
    effective_normalize = bool(CONFIG.IMAGE_NORMALIZE_EMBEDDINGS) if normalize is None else bool(normalize)
    urls: List[str] = []
    for i, url_or_path in enumerate(images):
        if not isinstance(url_or_path, str):
            raise HTTPException(status_code=400, detail=f"Invalid image at index {i}: must be string URL/path")
        s = url_or_path.strip()
        if not s:
            raise HTTPException(status_code=400, detail=f"Invalid image at index {i}: empty URL/path")
        urls.append(s)

    with _image_encode_lock:
        vectors = _image_model.encode_image_urls(
            urls,
            batch_size=CONFIG.IMAGE_BATCH_SIZE,
            normalize_embeddings=effective_normalize,
        )
    if vectors is None or len(vectors) != len(urls):
        raise RuntimeError(
            f"Image model response length mismatch: expected {len(urls)}, "
            f"got {0 if vectors is None else len(vectors)}"
        )
    out: List[Optional[List[float]]] = []
    for i, vec in enumerate(vectors):
        out_vec = _as_list(vec, normalize=effective_normalize)
        if out_vec is None:
            raise RuntimeError(f"Image model returned empty embedding for index {i}")
        out.append(out_vec)
    return out