service.py 3.75 KB
"""Translation service orchestration."""

from __future__ import annotations

import logging
from typing import Dict, List, Optional

from config.services_config import TranslationServiceConfig, get_translation_config
from translation.protocols import TranslateInput, TranslateOutput, TranslationBackendProtocol

logger = logging.getLogger(__name__)


class TranslationService:
    """Owns translation backends and routes calls by model and scene."""

    def __init__(self, config: Optional[TranslationServiceConfig] = None) -> None:
        self.config = config or get_translation_config()
        self._backends: Dict[str, TranslationBackendProtocol] = {}
        self._init_enabled_backends()

    def _init_enabled_backends(self) -> None:
        registry = {
            "qwen-mt": self._create_qwen_mt_backend,
            "deepl": self._create_deepl_backend,
            "llm": self._create_llm_backend,
        }
        for name in self.config.enabled_models:
            factory = registry.get(name)
            if factory is None:
                logger.warning("Translation backend '%s' is enabled but not registered", name)
                continue
            self._backends[name] = factory()

        if not self._backends:
            raise ValueError("No enabled translation backends found in services.translation.capabilities")

    def _create_qwen_mt_backend(self) -> TranslationBackendProtocol:
        from translation.backends.qwen_mt import QwenMTTranslationBackend

        cfg = self.config.get_capability_cfg("qwen-mt")
        return QwenMTTranslationBackend(
            model=cfg.get("model") or "qwen-mt-flash",
            api_key=cfg.get("api_key"),
            use_cache=bool(cfg.get("use_cache", True)),
            timeout=int(cfg.get("timeout_sec", 10)),
            glossary_id=cfg.get("glossary_id"),
            translation_context=cfg.get("translation_context"),
        )

    def _create_deepl_backend(self) -> TranslationBackendProtocol:
        from translation.backends.deepl import DeepLTranslationBackend

        cfg = self.config.get_capability_cfg("deepl")
        return DeepLTranslationBackend(
            api_key=cfg.get("api_key"),
            timeout=float(cfg.get("timeout_sec", 10.0)),
            glossary_id=cfg.get("glossary_id"),
        )

    def _create_llm_backend(self) -> TranslationBackendProtocol:
        from translation.backends.llm import LLMTranslationBackend

        cfg = self.config.get_capability_cfg("llm")
        return LLMTranslationBackend(
            model=cfg.get("model"),
            timeout_sec=float(cfg.get("timeout_sec", 30.0)),
            base_url=cfg.get("base_url"),
        )

    @property
    def available_models(self) -> List[str]:
        return list(self._backends.keys())

    def get_backend(self, model: Optional[str] = None) -> TranslationBackendProtocol:
        normalized = self.config.normalize_model_name(model)
        backend = self._backends.get(normalized)
        if backend is None:
            raise ValueError(
                f"Translation model '{normalized}' is not enabled. "
                f"Available models: {', '.join(self.available_models) or 'none'}"
            )
        return backend

    def translate(
        self,
        text: TranslateInput,
        target_lang: str,
        source_lang: Optional[str] = None,
        *,
        model: Optional[str] = None,
        scene: Optional[str] = None,
        prompt: Optional[str] = None,
    ) -> TranslateOutput:
        backend = self.get_backend(model)
        active_scene = (scene or self.config.default_scene or "general").strip() or "general"
        return backend.translate(
            text=text,
            target_lang=target_lang,
            source_lang=source_lang,
            context=active_scene,
            prompt=prompt,
        )