import logging import pytest import torch from translation.backends.local_seq2seq import MarianMTTranslationBackend, NLLBTranslationBackend from translation.backends.local_ctranslate2 import NLLBCTranslate2TranslationBackend from translation.languages import build_nllb_language_catalog, resolve_nllb_language_code from translation.service import TranslationService from translation.text_splitter import compute_safe_input_token_limit, split_text_for_translation class _FakeBatch(dict): def to(self, device): self["device"] = device return self class _FakeTokenizer: def __init__(self): self.src_lang = None self.pad_token = "" self.eos_token = "" self.lang_code_to_id = {"eng_Latn": 101, "zho_Hans": 202} self.last_call = None def __call__(self, texts, **kwargs): self.last_call = {"texts": list(texts), **kwargs} return _FakeBatch({"input_ids": torch.tensor([[1, 2, 3]])}) def batch_decode(self, generated, skip_special_tokens=True): del generated, skip_special_tokens return ["translated" for _ in range(len(self.last_call["texts"]))] def convert_tokens_to_ids(self, token): return self.lang_code_to_id[token] class _FakeModel: def to(self, device): self.device = device return self def eval(self): return self def generate(self, **kwargs): self.last_generate_kwargs = kwargs return [[42]] class _FakeCT2Tokenizer: def __init__(self, src_lang=None): self.src_lang = src_lang self.pad_token = "" self.eos_token = "" self.last_call = None def __call__(self, texts, **kwargs): self.last_call = {"texts": list(texts), **kwargs} return {"input_ids": [[1, 2, 3] for _ in texts]} def convert_ids_to_tokens(self, ids): del ids return ["tok_a", "tok_b", "tok_c"] def convert_tokens_to_ids(self, tokens): if isinstance(tokens, list): return [1 for _ in tokens] return 1 def decode(self, token_ids, skip_special_tokens=True): del token_ids, skip_special_tokens return "translated" class _FakeCT2Result: def __init__(self, tokens): self.hypotheses = [tokens] class _FakeCT2Translator: def __init__(self): self.last_translate_batch_kwargs = None def translate_batch(self, source_tokens, **kwargs): self.last_translate_batch_kwargs = {"source_tokens": source_tokens, **kwargs} target_prefix = kwargs.get("target_prefix") or [] return [ _FakeCT2Result((target_prefix[idx] or []) + ["translated_token"]) for idx, _ in enumerate(source_tokens) ] def _stub_load_model(self): self.tokenizer = _FakeTokenizer() self.seq2seq_model = _FakeModel() def _stub_load_ct2_runtime(self): self.tokenizer = _FakeCT2Tokenizer() self.translator = _FakeCT2Translator() def test_marian_language_validation(monkeypatch): monkeypatch.setattr(MarianMTTranslationBackend, "_load_model", _stub_load_model) backend = MarianMTTranslationBackend( name="opus-mt-zh-en", model_id="Helsinki-NLP/opus-mt-zh-en", model_dir="./models/translation/Helsinki-NLP/opus-mt-zh-en", device="cpu", torch_dtype="float32", batch_size=1, max_input_length=16, max_new_tokens=16, num_beams=1, source_langs=["zh"], target_langs=["en"], ) result = backend.translate("测试", source_lang="zh", target_lang="en") assert result == "translated" with pytest.raises(ValueError, match="source languages"): backend.translate("test", source_lang="en", target_lang="zh") def test_nllb_uses_src_lang_and_forced_bos(monkeypatch): monkeypatch.setattr(NLLBTranslationBackend, "_load_model", _stub_load_model) backend = NLLBTranslationBackend( name="nllb-200-distilled-600m", model_id="facebook/nllb-200-distilled-600M", model_dir="./models/translation/facebook/nllb-200-distilled-600M", device="cpu", torch_dtype="float32", batch_size=1, max_input_length=16, max_new_tokens=16, num_beams=1, ) result = backend.translate("test", source_lang="en", target_lang="zh") assert result == "translated" assert backend.tokenizer.src_lang == "eng_Latn" assert backend.seq2seq_model.last_generate_kwargs["forced_bos_token_id"] == 202 def test_nllb_accepts_finnish_short_code(monkeypatch): monkeypatch.setattr(NLLBTranslationBackend, "_load_model", _stub_load_model) backend = NLLBTranslationBackend( name="nllb-200-distilled-600m", model_id="facebook/nllb-200-distilled-600M", model_dir="./models/translation/facebook/nllb-200-distilled-600M", device="cpu", torch_dtype="float32", batch_size=1, max_input_length=16, max_new_tokens=16, num_beams=1, ) result = backend.translate("test", source_lang="fi", target_lang="zh") assert result == "translated" assert backend.tokenizer.src_lang == "fin_Latn" assert backend.seq2seq_model.last_generate_kwargs["forced_bos_token_id"] == 202 def test_nllb_ctranslate2_accepts_finnish_short_code(monkeypatch): created_tokenizers = [] def _fake_from_pretrained(source, src_lang=None, **kwargs): del source, kwargs tokenizer = _FakeCT2Tokenizer(src_lang=src_lang) created_tokenizers.append(tokenizer) return tokenizer monkeypatch.setattr(NLLBCTranslate2TranslationBackend, "_load_runtime", _stub_load_ct2_runtime) monkeypatch.setattr( "translation.backends.local_ctranslate2.AutoTokenizer.from_pretrained", _fake_from_pretrained, ) backend = NLLBCTranslate2TranslationBackend( name="nllb-200-distilled-600m", model_id="facebook/nllb-200-distilled-600M", model_dir="./models/translation/facebook/nllb-200-distilled-600M", device="cpu", torch_dtype="float32", batch_size=1, max_input_length=16, max_new_tokens=16, num_beams=1, ) result = backend.translate("test", source_lang="fi", target_lang="zh") assert result == "translated" assert len(created_tokenizers) == 1 assert created_tokenizers[0].src_lang == "fin_Latn" assert backend.translator.last_translate_batch_kwargs["target_prefix"] == [["zho_Hans"]] def test_nllb_ctranslate2_falls_back_to_model_id_when_local_dir_is_wrong_type(tmp_path, monkeypatch): wrong_dir = tmp_path / "wrong-nllb" wrong_dir.mkdir() (wrong_dir / "config.json").write_text('{"model_type":"led"}', encoding="utf-8") monkeypatch.setattr(NLLBCTranslate2TranslationBackend, "_load_runtime", _stub_load_ct2_runtime) backend = NLLBCTranslate2TranslationBackend( name="nllb-200-distilled-600m", model_id="facebook/nllb-200-distilled-600M", model_dir=str(wrong_dir), device="cpu", torch_dtype="float32", batch_size=1, max_input_length=16, max_new_tokens=16, num_beams=1, ) assert backend._model_source() == "facebook/nllb-200-distilled-600M" assert backend._tokenizer_source() == "facebook/nllb-200-distilled-600M" def test_nllb_ctranslate2_falls_back_to_model_id_when_local_dir_is_incomplete(tmp_path, monkeypatch): incomplete_dir = tmp_path / "incomplete-nllb" incomplete_dir.mkdir() (incomplete_dir / "ctranslate2-float16").mkdir() monkeypatch.setattr(NLLBCTranslate2TranslationBackend, "_load_runtime", _stub_load_ct2_runtime) backend = NLLBCTranslate2TranslationBackend( name="nllb-200-distilled-600m", model_id="facebook/nllb-200-distilled-600M", model_dir=str(incomplete_dir), device="cpu", torch_dtype="float32", batch_size=1, max_input_length=16, max_new_tokens=16, num_beams=1, ) assert backend._model_source() == "facebook/nllb-200-distilled-600M" def test_nllb_resolves_flores_short_tags_and_iso_no(): cat = build_nllb_language_catalog(None) assert resolve_nllb_language_code("ca", cat) == "cat_Latn" assert resolve_nllb_language_code("da", cat) == "dan_Latn" assert resolve_nllb_language_code("eu", cat) == "eus_Latn" assert resolve_nllb_language_code("gl", cat) == "glg_Latn" assert resolve_nllb_language_code("hu", cat) == "hun_Latn" assert resolve_nllb_language_code("id", cat) == "ind_Latn" assert resolve_nllb_language_code("nl", cat) == "nld_Latn" assert resolve_nllb_language_code("no", cat) == "nob_Latn" assert resolve_nllb_language_code("ro", cat) == "ron_Latn" assert resolve_nllb_language_code("SV", cat) == "swe_Latn" assert resolve_nllb_language_code("tr", cat) == "tur_Latn" assert resolve_nllb_language_code("deu_Latn", cat) == "deu_Latn" def test_translation_service_preloads_enabled_backends(monkeypatch): created = [] def _fake_create_backend(self, *, name, backend_type, cfg): del self, cfg created.append((name, backend_type)) class _Backend: model = name @property def supports_batch(self): return True def translate(self, text, target_lang, source_lang=None, scene=None): del target_lang, source_lang, scene return text return _Backend() monkeypatch.setattr(TranslationService, "_create_backend", _fake_create_backend) config = { "service_url": "http://127.0.0.1:6006", "timeout_sec": 10.0, "default_model": "opus-mt-en-zh", "default_scene": "general", "capabilities": { "opus-mt-en-zh": { "enabled": True, "backend": "local_marian", "use_cache": True, "model_id": "dummy", "model_dir": "dummy", "device": "cpu", "torch_dtype": "float32", "batch_size": 1, "max_input_length": 8, "max_new_tokens": 8, "num_beams": 1, }, "nllb-200-distilled-600m": { "enabled": True, "backend": "local_nllb", "use_cache": True, "model_id": "dummy", "model_dir": "dummy", "device": "cpu", "torch_dtype": "float32", "batch_size": 1, "max_input_length": 8, "max_new_tokens": 8, "num_beams": 1, }, }, "cache": { "ttl_seconds": 60, "sliding_expiration": True, }, } service = TranslationService(config) assert service.available_models == ["opus-mt-en-zh", "nllb-200-distilled-600m"] assert service.loaded_models == ["opus-mt-en-zh", "nllb-200-distilled-600m"] assert created == [ ("opus-mt-en-zh", "local_marian"), ("nllb-200-distilled-600m", "local_nllb"), ] backend = service.get_backend("opus-mt-en-zh") assert backend.model == "opus-mt-en-zh" def test_compute_safe_input_token_limit_uses_decode_constraints(): nllb_limit = compute_safe_input_token_limit( max_input_length=256, max_new_tokens=64, decoding_length_mode="source", decoding_length_extra=8, ) opus_limit = compute_safe_input_token_limit( max_input_length=256, max_new_tokens=256, ) assert nllb_limit == 56 assert opus_limit == 248 def test_split_text_for_translation_prefers_sentence_boundaries(): text = ( "这是一条很长的中文商品描述,包含材质、尺码和适用场景。" "适合春夏通勤,也适合日常出街穿搭;" "如果长度超了,应该优先按完整语义分句,而不是切成很碎的小片段。" ) segments = split_text_for_translation( text, max_tokens=36, token_length_fn=len, ) assert len(segments) >= 2 assert "".join(segments) == text assert all(len(segment) <= 36 for segment in segments) assert segments[0].endswith(("。", ";")) class _SegmentingMarianBackend(MarianMTTranslationBackend): def _load_model(self): self.translated_batches = [] def _token_count(self, text, target_lang, source_lang=None): del target_lang, source_lang return len(text) def _translate_batch(self, texts, target_lang, source_lang=None): del source_lang self.translated_batches.append(list(texts)) if target_lang == "zh": return [f"<{text.strip()}>" for text in texts] return [f"[{text.strip()}]" for text in texts] def test_local_backend_splits_oversized_text_before_translation(): backend = _SegmentingMarianBackend( name="opus-mt-en-zh", model_id="Helsinki-NLP/opus-mt-en-zh", model_dir="./models/translation/Helsinki-NLP/opus-mt-en-zh", device="cpu", torch_dtype="float32", batch_size=8, max_input_length=24, max_new_tokens=24, num_beams=1, source_langs=["en"], target_langs=["zh"], ) text = ( "This soft cotton dress is breathable and lightweight, " "works well for spring travel and everyday wear, " "and should be split on natural clause boundaries when it gets too long." ) result = backend.translate(text, source_lang="en", target_lang="zh") assert result is not None all_segments = [piece for batch in backend.translated_batches for piece in batch] assert len(all_segments) >= 2 assert all(len(batch) <= backend.batch_size for batch in backend.translated_batches) assert all(len(piece) <= 16 for piece in all_segments) assert result == "".join(f"<{piece.strip()}>" for piece in all_segments) def test_local_backend_batches_after_segmentation(): backend = _SegmentingMarianBackend( name="opus-mt-en-zh", model_id="Helsinki-NLP/opus-mt-en-zh", model_dir="./models/translation/Helsinki-NLP/opus-mt-en-zh", device="cpu", torch_dtype="float32", batch_size=4, max_input_length=24, max_new_tokens=24, num_beams=1, source_langs=["en"], target_langs=["zh"], ) texts = [ "alpha beta gamma delta, epsilon zeta eta theta, iota kappa lambda mu.", "nu xi omicron pi, rho sigma tau upsilon, phi chi psi omega.", "dress shirt coat pants, socks shoes belt scarf, hat gloves bag watch.", ] result = backend.translate(texts, source_lang="en", target_lang="zh") assert isinstance(result, list) assert len(result) == 3 assert len(backend.translated_batches) >= 2 assert all(len(batch) <= backend.batch_size for batch in backend.translated_batches) assert sum(len(batch) for batch in backend.translated_batches) > backend.batch_size assert all(item is not None for item in result) def test_local_backend_logs_segmentation_and_inference_batches(caplog): backend = _SegmentingMarianBackend( name="opus-mt-en-zh", model_id="Helsinki-NLP/opus-mt-en-zh", model_dir="./models/translation/Helsinki-NLP/opus-mt-en-zh", device="cpu", torch_dtype="float32", batch_size=2, max_input_length=24, max_new_tokens=24, num_beams=1, source_langs=["en"], target_langs=["zh"], ) texts = [ "one two three four, five six seven eight, nine ten eleven twelve.", "thirteen fourteen fifteen sixteen, seventeen eighteen nineteen twenty.", ] with caplog.at_level(logging.INFO): backend.translate(texts, source_lang="en", target_lang="zh") messages = [record.getMessage() for record in caplog.records] assert any(message.startswith("Translation segmentation summary |") for message in messages) inference_logs = [ message for message in messages if message.startswith("Translation inference batch |") ] assert len(inference_logs) >= 2