query_parser.py
18.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
"""
Query parser - main module for query processing.
Handles query rewriting, translation, and embedding generation.
"""
from typing import Dict, List, Optional, Any, Union
import numpy as np
import logging
import re
import hanlp
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
from embeddings import BgeEncoder
from config import SearchConfig
from .language_detector import LanguageDetector
from .translator import Translator
from .query_rewriter import QueryRewriter, QueryNormalizer
logger = logging.getLogger(__name__)
class ParsedQuery:
"""Container for parsed query results."""
def __init__(
self,
original_query: str,
normalized_query: str,
rewritten_query: Optional[str] = None,
detected_language: Optional[str] = None,
translations: Dict[str, str] = None,
query_vector: Optional[np.ndarray] = None,
domain: str = "default",
keywords: str = "",
token_count: int = 0,
is_short_query: bool = False,
is_long_query: bool = False
):
self.original_query = original_query
self.normalized_query = normalized_query
self.rewritten_query = rewritten_query or normalized_query
self.detected_language = detected_language
self.translations = translations or {}
self.query_vector = query_vector
self.domain = domain
# Query analysis fields
self.keywords = keywords
self.token_count = token_count
self.is_short_query = is_short_query
self.is_long_query = is_long_query
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary representation."""
result = {
"original_query": self.original_query,
"normalized_query": self.normalized_query,
"rewritten_query": self.rewritten_query,
"detected_language": self.detected_language,
"translations": self.translations,
"domain": self.domain,
"has_vector": self.query_vector is not None
}
return result
class QueryParser:
"""
Main query parser that processes queries through multiple stages:
1. Normalization
2. Query rewriting (brand/category mappings, synonyms)
3. Language detection
4. Translation to target languages
5. Text embedding generation (for semantic search)
"""
def __init__(
self,
config: SearchConfig,
text_encoder: Optional[BgeEncoder] = None,
translator: Optional[Translator] = None
):
"""
Initialize query parser.
Args:
config: SearchConfig instance
text_encoder: Text embedding encoder (lazy loaded if not provided)
translator: Translator instance (lazy loaded if not provided)
"""
self.config = config
self._text_encoder = text_encoder
self._translator = translator
# Initialize components
self.normalizer = QueryNormalizer()
self.language_detector = LanguageDetector()
self.rewriter = QueryRewriter(config.query_config.rewrite_dictionary)
# Initialize HanLP components at startup
logger.info("Initializing HanLP components...")
self._tok = hanlp.load(hanlp.pretrained.tok.CTB9_TOK_ELECTRA_BASE_CRF)
self._tok.config.output_spans = True
self._pos_tag = hanlp.load(hanlp.pretrained.pos.CTB9_POS_ELECTRA_SMALL)
logger.info("HanLP components initialized")
@property
def text_encoder(self) -> BgeEncoder:
"""Lazy load text encoder."""
if self._text_encoder is None and self.config.query_config.enable_text_embedding:
logger.info("Initializing text encoder (lazy load)...")
self._text_encoder = BgeEncoder()
return self._text_encoder
@property
def translator(self) -> Translator:
"""Lazy load translator."""
if self._translator is None:
logger.info("Initializing translator (lazy load)...")
self._translator = Translator(
api_key=self.config.query_config.translation_api_key,
use_cache=True,
glossary_id=self.config.query_config.translation_glossary_id,
translation_context=self.config.query_config.translation_context
)
return self._translator
def _extract_keywords(self, query: str) -> str:
"""Extract keywords (nouns with length > 1) from query."""
tok_result = self._tok(query)
if not tok_result:
return ""
words = [x[0] for x in tok_result]
pos_tags = self._pos_tag(words)
keywords = []
for word, pos in zip(words, pos_tags):
if len(word) > 1 and pos.startswith('N'):
keywords.append(word)
return " ".join(keywords)
def _get_token_count(self, query: str) -> int:
"""Get token count using HanLP."""
tok_result = self._tok(query)
return len(tok_result) if tok_result else 0
def _analyze_query_type(self, query: str, token_count: int) -> tuple:
"""Analyze query type: (is_short_query, is_long_query)."""
is_quoted = query.startswith('"') and query.endswith('"')
is_short = is_quoted or ((token_count <= 2 or len(query) <= 4) and ' ' not in query)
is_long = token_count >= 4
return is_short, is_long
def parse(
self,
query: str,
tenant_id: Optional[str] = None,
generate_vector: bool = True,
context: Optional[Any] = None
) -> ParsedQuery:
"""
Parse query through all processing stages.
Args:
query: Raw query string
generate_vector: Whether to generate query embedding
context: Optional request context for tracking and logging
Returns:
ParsedQuery object with all processing results
"""
# Initialize logger if context provided
logger = context.logger if context else None
if logger:
logger.info(
f"开始查询解析 | 原查询: '{query}' | 生成向量: {generate_vector}",
extra={'reqid': context.reqid, 'uid': context.uid}
)
def log_info(msg):
if context and hasattr(context, 'logger'):
context.logger.info(msg, extra={'reqid': context.reqid, 'uid': context.uid})
else:
logger.info(msg)
def log_debug(msg):
if context and hasattr(context, 'logger'):
context.logger.debug(msg, extra={'reqid': context.reqid, 'uid': context.uid})
else:
logger.debug(msg)
# Stage 1: Normalize
normalized = self.normalizer.normalize(query)
log_debug(f"标准化完成 | '{query}' -> '{normalized}'")
if context:
context.store_intermediate_result('normalized_query', normalized)
# Extract domain if present (e.g., "brand:Nike" -> domain="brand", query="Nike")
domain, query_text = self.normalizer.extract_domain_query(normalized)
log_debug(f"域提取 | 域: '{domain}', 查询: '{query_text}'")
if context:
context.store_intermediate_result('extracted_domain', domain)
context.store_intermediate_result('domain_query', query_text)
# Stage 2: Query rewriting
rewritten = None
if self.config.query_config.rewrite_dictionary: # Enable rewrite if dictionary exists
rewritten = self.rewriter.rewrite(query_text)
if rewritten != query_text:
log_info(f"查询重写 | '{query_text}' -> '{rewritten}'")
query_text = rewritten
if context:
context.store_intermediate_result('rewritten_query', rewritten)
context.add_warning(f"查询被重写: {query_text}")
# Stage 3: Language detection
detected_lang = self.language_detector.detect(query_text)
# Use default language if detection failed (None or "unknown")
if not detected_lang or detected_lang == "unknown":
detected_lang = self.config.query_config.default_language
log_info(f"语言检测 | 检测到语言: {detected_lang}")
if context:
context.store_intermediate_result('detected_language', detected_lang)
# Stage 4: Translation (with async support and conditional waiting)
translations = {}
translation_futures = {}
try:
# 根据租户配置决定翻译目标语言
from config.tenant_config_loader import get_tenant_config_loader
tenant_loader = get_tenant_config_loader()
tenant_cfg = tenant_loader.get_tenant_config(tenant_id or "default")
translate_to_zh = bool(tenant_cfg.get("translate_to_zh"))
translate_to_en = bool(tenant_cfg.get("translate_to_en"))
target_langs_for_translation = []
if translate_to_zh:
target_langs_for_translation.append('zh')
if translate_to_en:
target_langs_for_translation.append('en')
# 如果该租户未开启任何翻译方向,则直接跳过翻译阶段
if target_langs_for_translation:
target_langs = [lang for lang in target_langs_for_translation if detected_lang != lang]
if target_langs:
# Use e-commerce context for better disambiguation
translation_context = self.config.query_config.translation_context
# For query translation, we use a general prompt (not language-specific)
query_prompt = self.config.query_config.translation_prompts.get('query_zh') or \
self.config.query_config.translation_prompts.get('default_zh')
# Determine if we need to wait for translation results
# If detected_lang is neither 'en' nor 'zh', we must wait for translation
need_wait_translation = detected_lang not in ['en', 'zh']
if need_wait_translation:
# Use async method that returns Futures, so we can wait for results
translation_results = self.translator.translate_multi_async(
query_text,
target_langs,
source_lang=detected_lang,
context=translation_context,
prompt=query_prompt
)
# Separate cached results and futures
for lang, result in translation_results.items():
if isinstance(result, Future):
translation_futures[lang] = result
else:
translations[lang] = result
else:
# Use async mode: returns cached translations immediately, missing ones translated in background
translations = self.translator.translate_multi(
query_text,
target_langs,
source_lang=detected_lang,
context=translation_context,
async_mode=True,
prompt=query_prompt
)
# Filter out None values (missing translations that are being processed async)
translations = {k: v for k, v in translations.items() if v is not None}
if translations:
log_info(f"翻译完成(缓存命中) | 结果: {translations}")
if translation_futures:
log_debug(f"翻译进行中,等待结果... | 语言: {list(translation_futures.keys())}")
if context:
context.store_intermediate_result('translations', translations)
for lang, translation in translations.items():
if translation:
context.store_intermediate_result(f'translation_{lang}', translation)
except Exception as e:
error_msg = f"翻译失败 | 错误: {str(e)}"
log_info(error_msg)
if context:
context.add_warning(error_msg)
# Stage 5: Query analysis (keywords, token count, query type)
keywords = self._extract_keywords(query_text)
token_count = self._get_token_count(query_text)
is_short_query, is_long_query = self._analyze_query_type(query_text, token_count)
log_debug(f"查询分析 | 关键词: {keywords} | token数: {token_count} | "
f"短查询: {is_short_query} | 长查询: {is_long_query}")
if context:
context.store_intermediate_result('keywords', keywords)
context.store_intermediate_result('token_count', token_count)
context.store_intermediate_result('is_short_query', is_short_query)
context.store_intermediate_result('is_long_query', is_long_query)
# Stage 6: Text embedding (only for non-short queries) - async execution
query_vector = None
embedding_future = None
should_generate_embedding = (
generate_vector and
self.config.query_config.enable_text_embedding and
domain == "default" and
not is_short_query
)
encoding_executor = None
if should_generate_embedding:
try:
log_debug("开始生成查询向量(异步)")
# Submit encoding task to thread pool for async execution
encoding_executor = ThreadPoolExecutor(max_workers=1)
embedding_future = encoding_executor.submit(
lambda: self.text_encoder.encode([query_text])[0]
)
except Exception as e:
error_msg = f"查询向量生成任务提交失败 | 错误: {str(e)}"
log_info(error_msg)
if context:
context.add_warning(error_msg)
encoding_executor = None
embedding_future = None
# Wait for all async tasks to complete (translation and embedding)
if translation_futures or embedding_future:
log_debug("等待异步任务完成...")
# Collect all futures with their identifiers
all_futures = []
future_to_lang = {}
for lang, future in translation_futures.items():
all_futures.append(future)
future_to_lang[future] = ('translation', lang)
if embedding_future:
all_futures.append(embedding_future)
future_to_lang[embedding_future] = ('embedding', None)
# Wait for all futures to complete
for future in as_completed(all_futures):
task_type, lang = future_to_lang[future]
try:
result = future.result()
if task_type == 'translation':
if result:
translations[lang] = result
log_info(f"翻译完成 | {lang}: {result}")
if context:
context.store_intermediate_result(f'translation_{lang}', result)
elif task_type == 'embedding':
query_vector = result
log_debug(f"查询向量生成完成 | 形状: {query_vector.shape}")
if context:
context.store_intermediate_result('query_vector_shape', query_vector.shape)
except Exception as e:
if task_type == 'translation':
error_msg = f"翻译失败 | 语言: {lang} | 错误: {str(e)}"
else:
error_msg = f"查询向量生成失败 | 错误: {str(e)}"
log_info(error_msg)
if context:
context.add_warning(error_msg)
# Clean up encoding executor
if encoding_executor:
encoding_executor.shutdown(wait=False)
# Update translations in context after all are complete
if translations and context:
context.store_intermediate_result('translations', translations)
# Build result
result = ParsedQuery(
original_query=query,
normalized_query=normalized,
rewritten_query=rewritten,
detected_language=detected_lang,
translations=translations,
query_vector=query_vector,
domain=domain,
keywords=keywords,
token_count=token_count,
is_short_query=is_short_query,
is_long_query=is_long_query
)
if context and hasattr(context, 'logger'):
context.logger.info(
f"查询解析完成 | 原查询: '{query}' | 最终查询: '{rewritten or query_text}' | "
f"语言: {detected_lang} | 域: {domain} | "
f"翻译数量: {len(translations)} | 向量: {'是' if query_vector is not None else '否'}",
extra={'reqid': context.reqid, 'uid': context.uid}
)
else:
logger.info(
f"查询解析完成 | 原查询: '{query}' | 最终查询: '{rewritten or query_text}' | "
f"语言: {detected_lang} | 域: {domain}"
)
return result
def get_search_queries(self, parsed_query: ParsedQuery) -> List[str]:
"""
Get list of queries to search (original + translations).
Args:
parsed_query: Parsed query object
Returns:
List of query strings to search
"""
queries = [parsed_query.rewritten_query]
# Add translations
for lang, translation in parsed_query.translations.items():
if translation and translation != parsed_query.rewritten_query:
queries.append(translation)
return queries
def update_rewrite_rules(self, rules: Dict[str, str]) -> None:
"""
Update query rewrite rules.
Args:
rules: Dictionary of pattern -> replacement mappings
"""
for pattern, replacement in rules.items():
self.rewriter.add_rule(pattern, replacement)
def get_rewrite_rules(self) -> Dict[str, str]:
"""Get current rewrite rules."""
return self.rewriter.get_rules()