query_parser.py 13.9 KB
"""
Query parser - main module for query processing.

Handles query rewriting, translation, and embedding generation.
"""

from typing import Dict, List, Optional, Any
import numpy as np
import logging
import re
import hanlp

from embeddings import BgeEncoder
from config import SearchConfig
from .language_detector import LanguageDetector
from .translator import Translator
from .query_rewriter import QueryRewriter, QueryNormalizer

logger = logging.getLogger(__name__)


class ParsedQuery:
    """Container for parsed query results."""

    def __init__(
        self,
        original_query: str,
        normalized_query: str,
        rewritten_query: Optional[str] = None,
        detected_language: str = "unknown",
        translations: Dict[str, str] = None,
        query_vector: Optional[np.ndarray] = None,
        domain: str = "default",
        keywords: str = "",
        token_count: int = 0,
        is_short_query: bool = False,
        is_long_query: bool = False
    ):
        self.original_query = original_query
        self.normalized_query = normalized_query
        self.rewritten_query = rewritten_query or normalized_query
        self.detected_language = detected_language
        self.translations = translations or {}
        self.query_vector = query_vector
        self.domain = domain
        # Query analysis fields
        self.keywords = keywords
        self.token_count = token_count
        self.is_short_query = is_short_query
        self.is_long_query = is_long_query

    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary representation."""
        result = {
            "original_query": self.original_query,
            "normalized_query": self.normalized_query,
            "rewritten_query": self.rewritten_query,
            "detected_language": self.detected_language,
            "translations": self.translations,
            "domain": self.domain,
            "has_vector": self.query_vector is not None
        }
        return result


class QueryParser:
    """
    Main query parser that processes queries through multiple stages:
    1. Normalization
    2. Query rewriting (brand/category mappings, synonyms)
    3. Language detection
    4. Translation to target languages
    5. Text embedding generation (for semantic search)
    """

    def __init__(
        self,
        config: SearchConfig,
        text_encoder: Optional[BgeEncoder] = None,
        translator: Optional[Translator] = None
    ):
        """
        Initialize query parser.

        Args:
            config: SearchConfig instance
            text_encoder: Text embedding encoder (lazy loaded if not provided)
            translator: Translator instance (lazy loaded if not provided)
        """
        self.config = config
        self._text_encoder = text_encoder
        self._translator = translator

        # Initialize components
        self.normalizer = QueryNormalizer()
        self.language_detector = LanguageDetector()
        self.rewriter = QueryRewriter(config.query_config.rewrite_dictionary)
        
        # Initialize HanLP components at startup
        logger.info("Initializing HanLP components...")
        self._tok = hanlp.load(hanlp.pretrained.tok.CTB9_TOK_ELECTRA_BASE_CRF)
        self._tok.config.output_spans = True
        self._pos_tag = hanlp.load(hanlp.pretrained.pos.CTB9_POS_ELECTRA_SMALL)
        logger.info("HanLP components initialized")

    @property
    def text_encoder(self) -> BgeEncoder:
        """Lazy load text encoder."""
        if self._text_encoder is None and self.config.query_config.enable_text_embedding:
            logger.info("Initializing text encoder (lazy load)...")
            self._text_encoder = BgeEncoder()
        return self._text_encoder

    @property
    def translator(self) -> Translator:
        """Lazy load translator."""
        if self._translator is None and self.config.query_config.enable_translation:
            logger.info("Initializing translator (lazy load)...")
            self._translator = Translator(
                api_key=self.config.query_config.translation_api_key,
                use_cache=True,
                glossary_id=self.config.query_config.translation_glossary_id,
                translation_context=self.config.query_config.translation_context
            )
        return self._translator
    
    def _extract_keywords(self, query: str) -> str:
        """Extract keywords (nouns with length > 1) from query."""
        tok_result = self._tok(query)
        if not tok_result:
            return ""
        
        words = [x[0] for x in tok_result]
        pos_tags = self._pos_tag(words)
        
        keywords = []
        for word, pos in zip(words, pos_tags):
            if len(word) > 1 and pos.startswith('N'):
                keywords.append(word)
        
        return " ".join(keywords)
    
    def _get_token_count(self, query: str) -> int:
        """Get token count using HanLP."""
        tok_result = self._tok(query)
        return len(tok_result) if tok_result else 0
    
    def _analyze_query_type(self, query: str, token_count: int) -> tuple:
        """Analyze query type: (is_short_query, is_long_query)."""
        is_quoted = query.startswith('"') and query.endswith('"')
        is_short = is_quoted or ((token_count <= 2 or len(query) <= 4) and ' ' not in query)
        is_long = token_count >= 4
        return is_short, is_long

    def parse(self, query: str, generate_vector: bool = True, context: Optional[Any] = None) -> ParsedQuery:
        """
        Parse query through all processing stages.

        Args:
            query: Raw query string
            generate_vector: Whether to generate query embedding
            context: Optional request context for tracking and logging

        Returns:
            ParsedQuery object with all processing results
        """
        # Initialize logger if context provided
        logger = context.logger if context else None
        if logger:
            logger.info(
                f"开始查询解析 | 原查询: '{query}' | 生成向量: {generate_vector}",
                extra={'reqid': context.reqid, 'uid': context.uid}
            )

        def log_info(msg):
            if context and hasattr(context, 'logger'):
                context.logger.info(msg, extra={'reqid': context.reqid, 'uid': context.uid})
            else:
                logger.info(msg)

        def log_debug(msg):
            if context and hasattr(context, 'logger'):
                context.logger.debug(msg, extra={'reqid': context.reqid, 'uid': context.uid})
            else:
                logger.debug(msg)

        # Stage 1: Normalize
        normalized = self.normalizer.normalize(query)
        log_debug(f"标准化完成 | '{query}' -> '{normalized}'")
        if context:
            context.store_intermediate_result('normalized_query', normalized)

        # Extract domain if present (e.g., "brand:Nike" -> domain="brand", query="Nike")
        domain, query_text = self.normalizer.extract_domain_query(normalized)
        log_debug(f"域提取 | 域: '{domain}', 查询: '{query_text}'")
        if context:
            context.store_intermediate_result('extracted_domain', domain)
            context.store_intermediate_result('domain_query', query_text)

        # Stage 2: Query rewriting
        rewritten = None
        if self.config.query_config.rewrite_dictionary:  # Enable rewrite if dictionary exists
            rewritten = self.rewriter.rewrite(query_text)
            if rewritten != query_text:
                log_info(f"查询重写 | '{query_text}' -> '{rewritten}'")
                query_text = rewritten
                if context:
                    context.store_intermediate_result('rewritten_query', rewritten)
                    context.add_warning(f"查询被重写: {query_text}")

        # Stage 3: Language detection
        detected_lang = self.language_detector.detect(query_text)
        log_info(f"语言检测 | 检测到语言: {detected_lang}")
        if context:
            context.store_intermediate_result('detected_language', detected_lang)

        # Stage 4: Translation (async mode - only returns cached results, missing ones translated in background)
        translations = {}
        if self.config.query_config.enable_translation:
            try:
                # Determine target languages for translation
                # Simplified: always translate to Chinese and English
                target_langs_for_translation = ['zh', 'en']

                target_langs = self.translator.get_translation_needs(
                    detected_lang,
                    target_langs_for_translation
                )

                if target_langs:
                    # Use e-commerce context for better disambiguation
                    translation_context = 'e-commerce product search'
                    # Use async mode: returns cached translations immediately, missing ones translated in background
                    translations = self.translator.translate_multi(
                        query_text,
                        target_langs,
                        source_lang=detected_lang,
                        context=translation_context,
                        async_mode=True
                    )
                    # Filter out None values (missing translations that are being processed async)
                    translations = {k: v for k, v in translations.items() if v is not None}
                    
                    if translations:
                        log_info(f"翻译完成(缓存命中) | 结果: {translations}")
                    else:
                        log_debug(f"翻译未命中缓存,异步翻译中...")
                    
                    if context:
                        context.store_intermediate_result('translations', translations)
                        for lang, translation in translations.items():
                            if translation:
                                context.store_intermediate_result(f'translation_{lang}', translation)

            except Exception as e:
                error_msg = f"翻译失败 | 错误: {str(e)}"
                log_info(error_msg)
                if context:
                    context.add_warning(error_msg)

        # Stage 5: Query analysis (keywords, token count, query type)
        keywords = self._extract_keywords(query_text)
        token_count = self._get_token_count(query_text)
        is_short_query, is_long_query = self._analyze_query_type(query_text, token_count)
        
        log_debug(f"查询分析 | 关键词: {keywords} | token数: {token_count} | "
                 f"短查询: {is_short_query} | 长查询: {is_long_query}")
        if context:
            context.store_intermediate_result('keywords', keywords)
            context.store_intermediate_result('token_count', token_count)
            context.store_intermediate_result('is_short_query', is_short_query)
            context.store_intermediate_result('is_long_query', is_long_query)
        
        # Stage 6: Text embedding (only for non-short queries)
        query_vector = None
        should_generate_embedding = (
            generate_vector and
            self.config.query_config.enable_text_embedding and
            domain == "default" and
            not is_short_query
        )
        
        if should_generate_embedding:
            try:
                log_debug("开始生成查询向量")
                query_vector = self.text_encoder.encode([query_text])[0]
                log_debug(f"查询向量生成完成 | 形状: {query_vector.shape}")
                if context:
                    context.store_intermediate_result('query_vector_shape', query_vector.shape)
            except Exception as e:
                error_msg = f"查询向量生成失败 | 错误: {str(e)}"
                log_info(error_msg)
                if context:
                    context.add_warning(error_msg)

        # Build result
        result = ParsedQuery(
            original_query=query,
            normalized_query=normalized,
            rewritten_query=rewritten,
            detected_language=detected_lang,
            translations=translations,
            query_vector=query_vector,
            domain=domain,
            keywords=keywords,
            token_count=token_count,
            is_short_query=is_short_query,
            is_long_query=is_long_query
        )

        if context and hasattr(context, 'logger'):
            context.logger.info(
                f"查询解析完成 | 原查询: '{query}' | 最终查询: '{rewritten or query_text}' | "
                f"语言: {detected_lang} | 域: {domain} | "
                f"翻译数量: {len(translations)} | 向量: {'是' if query_vector is not None else '否'}",
                extra={'reqid': context.reqid, 'uid': context.uid}
            )
        else:
            logger.info(
                f"查询解析完成 | 原查询: '{query}' | 最终查询: '{rewritten or query_text}' | "
                f"语言: {detected_lang} | 域: {domain}"
            )

        return result

    def get_search_queries(self, parsed_query: ParsedQuery) -> List[str]:
        """
        Get list of queries to search (original + translations).

        Args:
            parsed_query: Parsed query object

        Returns:
            List of query strings to search
        """
        queries = [parsed_query.rewritten_query]

        # Add translations
        for lang, translation in parsed_query.translations.items():
            if translation and translation != parsed_query.rewritten_query:
                queries.append(translation)

        return queries

    def update_rewrite_rules(self, rules: Dict[str, str]) -> None:
        """
        Update query rewrite rules.

        Args:
            rules: Dictionary of pattern -> replacement mappings
        """
        for pattern, replacement in rules.items():
            self.rewriter.add_rule(pattern, replacement)

    def get_rewrite_rules(self) -> Dict[str, str]:
        """Get current rewrite rules."""
        return self.rewriter.get_rules()