query_parser.py
10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
"""
Query parser - main module for query processing.
Handles query rewriting, translation, and embedding generation.
"""
from typing import Dict, List, Optional, Any
import numpy as np
from config import CustomerConfig, QueryConfig
from embeddings import BgeEncoder
from .language_detector import LanguageDetector
from .translator import Translator
from .query_rewriter import QueryRewriter, QueryNormalizer
class ParsedQuery:
"""Container for parsed query results."""
def __init__(
self,
original_query: str,
normalized_query: str,
rewritten_query: Optional[str] = None,
detected_language: str = "unknown",
translations: Dict[str, str] = None,
query_vector: Optional[np.ndarray] = None,
domain: str = "default"
):
self.original_query = original_query
self.normalized_query = normalized_query
self.rewritten_query = rewritten_query or normalized_query
self.detected_language = detected_language
self.translations = translations or {}
self.query_vector = query_vector
self.domain = domain
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary representation."""
result = {
"original_query": self.original_query,
"normalized_query": self.normalized_query,
"rewritten_query": self.rewritten_query,
"detected_language": self.detected_language,
"translations": self.translations,
"domain": self.domain,
"has_vector": self.query_vector is not None
}
return result
class QueryParser:
"""
Main query parser that processes queries through multiple stages:
1. Normalization
2. Query rewriting (brand/category mappings, synonyms)
3. Language detection
4. Translation to target languages
5. Text embedding generation (for semantic search)
"""
def __init__(
self,
config: CustomerConfig,
text_encoder: Optional[BgeEncoder] = None,
translator: Optional[Translator] = None
):
"""
Initialize query parser.
Args:
config: Customer configuration
text_encoder: Text embedding encoder (lazy loaded if not provided)
translator: Translator instance (lazy loaded if not provided)
"""
self.config = config
self.query_config = config.query_config
self._text_encoder = text_encoder
self._translator = translator
# Initialize components
self.normalizer = QueryNormalizer()
self.language_detector = LanguageDetector()
self.rewriter = QueryRewriter(self.query_config.rewrite_dictionary)
@property
def text_encoder(self) -> BgeEncoder:
"""Lazy load text encoder."""
if self._text_encoder is None and self.query_config.enable_text_embedding:
print("[QueryParser] Initializing text encoder...")
self._text_encoder = BgeEncoder()
return self._text_encoder
@property
def translator(self) -> Translator:
"""Lazy load translator."""
if self._translator is None and self.query_config.enable_translation:
print("[QueryParser] Initializing translator...")
self._translator = Translator(
api_key=self.query_config.translation_api_key,
use_cache=True
)
return self._translator
def parse(self, query: str, generate_vector: bool = True, context: Optional[Any] = None) -> ParsedQuery:
"""
Parse query through all processing stages.
Args:
query: Raw query string
generate_vector: Whether to generate query embedding
context: Optional request context for tracking and logging
Returns:
ParsedQuery object with all processing results
"""
# Initialize logger if context provided
logger = context.logger if context else None
if logger:
logger.info(
f"开始查询解析 | 原查询: '{query}' | 生成向量: {generate_vector}",
extra={'reqid': context.reqid, 'uid': context.uid}
)
# Use print statements for backward compatibility if no context
def log_info(msg):
if logger:
logger.info(msg, extra={'reqid': context.reqid, 'uid': context.uid})
else:
print(f"[QueryParser] {msg}")
def log_debug(msg):
if logger:
logger.debug(msg, extra={'reqid': context.reqid, 'uid': context.uid})
else:
print(f"[QueryParser] {msg}")
# Stage 1: Normalize
normalized = self.normalizer.normalize(query)
log_debug(f"标准化完成 | '{query}' -> '{normalized}'")
if context:
context.store_intermediate_result('normalized_query', normalized)
# Extract domain if present (e.g., "brand:Nike" -> domain="brand", query="Nike")
domain, query_text = self.normalizer.extract_domain_query(normalized)
log_debug(f"域提取 | 域: '{domain}', 查询: '{query_text}'")
if context:
context.store_intermediate_result('extracted_domain', domain)
context.store_intermediate_result('domain_query', query_text)
# Stage 2: Query rewriting
rewritten = None
if self.query_config.enable_query_rewrite:
rewritten = self.rewriter.rewrite(query_text)
if rewritten != query_text:
log_info(f"查询重写 | '{query_text}' -> '{rewritten}'")
query_text = rewritten
if context:
context.store_intermediate_result('rewritten_query', rewritten)
context.add_warning(f"查询被重写: {query_text}")
# Stage 3: Language detection
detected_lang = self.language_detector.detect(query_text)
log_info(f"语言检测 | 检测到语言: {detected_lang}")
if context:
context.store_intermediate_result('detected_language', detected_lang)
# Stage 4: Translation
translations = {}
if self.query_config.enable_translation:
try:
# Determine target languages for translation
# If domain has language_field_mapping, only translate to languages in the mapping
# Otherwise, use all supported languages
target_langs_for_translation = self.query_config.supported_languages
# Check if domain has language_field_mapping
domain_config = next(
(idx for idx in self.config.indexes if idx.name == domain),
None
)
if domain_config and domain_config.language_field_mapping:
# Only translate to languages that exist in the mapping
available_languages = set(domain_config.language_field_mapping.keys())
target_langs_for_translation = [
lang for lang in self.query_config.supported_languages
if lang in available_languages
]
log_debug(f"域 '{domain}' 有语言字段映射,将翻译到: {target_langs_for_translation}")
target_langs = self.translator.get_translation_needs(
detected_lang,
target_langs_for_translation
)
if target_langs:
log_info(f"开始翻译 | 源语言: {detected_lang} | 目标语言: {target_langs}")
translations = self.translator.translate_multi(
query_text,
target_langs,
source_lang=detected_lang
)
log_info(f"翻译完成 | 结果: {translations}")
if context:
context.store_intermediate_result('translations', translations)
for lang, translation in translations.items():
if translation:
context.store_intermediate_result(f'translation_{lang}', translation)
except Exception as e:
error_msg = f"翻译失败 | 错误: {str(e)}"
log_info(error_msg)
if context:
context.add_warning(error_msg)
# Stage 5: Text embedding
query_vector = None
if (generate_vector and
self.query_config.enable_text_embedding and
domain == "default"): # Only generate vector for default domain
try:
log_debug("开始生成查询向量")
query_vector = self.text_encoder.encode([query_text])[0]
log_debug(f"查询向量生成完成 | 形状: {query_vector.shape}")
if context:
context.store_intermediate_result('query_vector_shape', query_vector.shape)
except Exception as e:
error_msg = f"查询向量生成失败 | 错误: {str(e)}"
log_info(error_msg)
if context:
context.add_warning(error_msg)
# Build result
result = ParsedQuery(
original_query=query,
normalized_query=normalized,
rewritten_query=rewritten,
detected_language=detected_lang,
translations=translations,
query_vector=query_vector,
domain=domain
)
if logger:
logger.info(
f"查询解析完成 | 原查询: '{query}' | 最终查询: '{rewritten or query_text}' | "
f"语言: {detected_lang} | 域: {domain} | "
f"翻译数量: {len(translations)} | 向量: {'是' if query_vector is not None else '否'}",
extra={'reqid': context.reqid, 'uid': context.uid}
)
else:
print(f"[QueryParser] Parsing complete")
return result
def get_search_queries(self, parsed_query: ParsedQuery) -> List[str]:
"""
Get list of queries to search (original + translations).
Args:
parsed_query: Parsed query object
Returns:
List of query strings to search
"""
queries = [parsed_query.rewritten_query]
# Add translations
for lang, translation in parsed_query.translations.items():
if translation and translation != parsed_query.rewritten_query:
queries.append(translation)
return queries
def update_rewrite_rules(self, rules: Dict[str, str]) -> None:
"""
Update query rewrite rules.
Args:
rules: Dictionary of pattern -> replacement mappings
"""
for pattern, replacement in rules.items():
self.rewriter.add_rule(pattern, replacement)
def get_rewrite_rules(self) -> Dict[str, str]:
"""Get current rewrite rules."""
return self.rewriter.get_rules()