test_reranker_qwen3_gguf_backend.py
3.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from __future__ import annotations
import sys
import types
from reranker.backends import get_rerank_backend
from reranker.backends.qwen3_gguf import Qwen3GGUFRerankerBackend
class _FakeLlama:
def __init__(self, model_path: str | None = None, **kwargs):
self.model_path = model_path
self.kwargs = kwargs
self.eval_logits = []
self._tokens = []
self.eval_call_count = 0
@classmethod
def from_pretrained(cls, repo_id: str, filename: str, local_dir=None, cache_dir=None, **kwargs):
inst = cls(model_path=f"{repo_id}/{filename}", **kwargs)
inst.repo_id = repo_id
inst.filename = filename
inst.local_dir = local_dir
inst.cache_dir = cache_dir
return inst
def tokenize(self, text: bytes, add_bos: bool = False, special: bool = False):
raw = text.decode("utf-8")
if raw == "yes":
return [1]
if raw == "no":
return [2]
return [10 + (ord(ch) % 17) for ch in raw]
def reset(self):
self._tokens = []
return None
def eval(self, prompt_tokens):
self.eval_call_count += 1
self._tokens.extend(prompt_tokens)
pos = float(sum(self._tokens) % 11) + 3.0
neg = 1.0
logits = [0.0] * 64
logits[1] = pos
logits[2] = neg
self.eval_logits = [logits]
def save_state(self):
return list(self._tokens)
def load_state(self, state):
self._tokens = list(state)
def _install_fake_llama_cpp(monkeypatch):
fake_module = types.SimpleNamespace(Llama=_FakeLlama)
monkeypatch.setitem(sys.modules, "llama_cpp", fake_module)
def test_qwen3_gguf_backend_factory_loads(monkeypatch):
_install_fake_llama_cpp(monkeypatch)
backend = get_rerank_backend(
"qwen3_gguf",
{
"repo_id": "DevQuasar/Qwen.Qwen3-Reranker-4B-GGUF",
"filename": "*Q8_0.gguf",
"enable_warmup": False,
},
)
assert isinstance(backend, Qwen3GGUFRerankerBackend)
assert backend._backend_name == "qwen3_gguf"
def test_qwen3_gguf_06b_backend_factory_loads(monkeypatch):
_install_fake_llama_cpp(monkeypatch)
backend = get_rerank_backend(
"qwen3_gguf_06b",
{
"enable_warmup": False,
},
)
assert isinstance(backend, Qwen3GGUFRerankerBackend)
assert backend._backend_name == "qwen3_gguf_06b"
assert backend._repo_id == "ggml-org/Qwen3-Reranker-0.6B-Q8_0-GGUF"
assert backend._filename == "qwen3-reranker-0.6b-q8_0.gguf"
def test_qwen3_gguf_backend_score_with_meta_dedup_and_restore(monkeypatch):
_install_fake_llama_cpp(monkeypatch)
backend = Qwen3GGUFRerankerBackend(
{
"repo_id": "DevQuasar/Qwen.Qwen3-Reranker-4B-GGUF",
"filename": "*Q8_0.gguf",
"enable_warmup": False,
"infer_batch_size": 2,
"sort_by_doc_length": True,
"reuse_query_state": True,
}
)
scores, meta = backend.score_with_meta(
query="wireless mouse",
docs=["doc-a", "doc-b", "doc-a", "", " ", None],
normalize=True,
)
assert len(scores) == 6
assert scores[0] == scores[2]
assert scores[0] > 0.5
assert scores[1] > 0.5
assert scores[3:] == [0.0, 0.0, 0.0]
assert meta["input_docs"] == 6
assert meta["usable_docs"] == 3
assert meta["unique_docs"] == 2
assert meta["backend"] == "qwen3_gguf"
assert meta["inference_batches"] == 1
assert meta["reuse_query_state"] is True
assert backend._llm.eval_call_count == 3