diff --git a/config/languages.py b/config/languages.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/config/languages.py
diff --git a/context/request_context.py b/context/request_context.py
index 9f33a3b..708afb3 100644
--- a/context/request_context.py
+++ b/context/request_context.py
@@ -268,7 +268,6 @@ class RequestContext:
'rewritten_query': self.query_analysis.rewritten_query,
'detected_language': self.query_analysis.detected_language,
'domain': self.query_analysis.domain,
- 'has_vector': self.query_analysis.query_vector is not None,
'is_simple_query': self.query_analysis.is_simple_query
},
'performance': {
diff --git a/docs/recommendation/冷启动和数据稀缺场景下推荐系统.md b/docs/recommendation/冷启动和数据稀缺场景下推荐系统.md
new file mode 100644
index 0000000..abc7044
--- /dev/null
+++ b/docs/recommendation/冷启动和数据稀缺场景下推荐系统.md
@@ -0,0 +1,113 @@
+
+
+## 一、核心技术框架
+
+### 1. **贝叶斯方法体系 (Bayesian Methods)**
+
+独立站数据稀疏场景下,贝叶斯方法通过引入先验知识弥补数据不足:
+
+| 技术 | 原理 | 应用场景 |
+|------|------|----------|
+| **贝叶斯概率矩阵分解 (BPMF)** | 对用户-物品评分矩阵进行概率建模,引入高斯先验 | 新用户/新商品的评分预测 |
+| **贝叶斯个性化排序 (BPR)** | 利用贝叶斯推断优化排序损失函数 | 冷启动用户的推荐列表生成 |
+| **朴素贝叶斯分类器** | 基于用户属性/物品特征的联合概率分布 | 新用户初次进入时的品类推荐 |
+| **贝叶斯网络** | 构建用户特征-物品特征-评分的因果DAG | 融合内容信息解决冷启动 |
+
+**关键优势**:通过先验分布(如用户画像、商品类目)在数据稀缺时提供合理初始估计,随着数据积累自动更新为后验分布。
+
+### 2. **Contextual Bandit & LinUCB 算法族**
+
+这是解决**探索-利用(EE)**问题的黄金标准,特别适合独立站实时推荐:
+
+**LinUCB (Linear Upper Confidence Bound)**
+- **数学形式**:$a_t = \arg\max_{a \in A} (x_{t,a}^T \hat{\theta}_a + \alpha \sqrt{x_{t,a}^T (A_a)^{-1} x_{t,a}})$
+- **独立站适配**:
+ - 将商品作为"臂"(arm),用户特征( demographics、浏览历史)作为上下文(context)
+ - 对新商品/新用户自动增加探索项($\alpha$控制探索强度)
+ - 在线学习,每轮交互后立即更新参数,无需离线重训练
+
+**汤普森采样 (Thompson Sampling)**
+- 为每个候选物品维护一个奖励概率分布(通常用Beta分布)
+- **冷启动友好**:新物品初始分布较宽(不确定性高),天然获得更多探索机会
+- 淘宝、阿里飞猪等用于推荐理由和首图优选
+
+**EE-Net**
+- 双神经网络结构:一个网络学习利用(Exploitation),另一个网络学习探索潜力(Exploration)
+- 理论保证达到 $\mathcal{O}(\sqrt{T\log T})$ 的累积遗憾界
+
+### 3. **元学习 (Meta-Learning) / 小样本学习**
+
+针对独立站"数据少但用户/商品更新快"的特点:
+
+**MAML-based 推荐**
+- **核心思想**:学习"如何学习",即找到一个好的模型初始化参数,使得仅用极少数据(1-5个交互)就能快速适应新用户
+- **代表工作**:
+ - **MeLU** :为冷启动用户生成定制化嵌入向量,只需少量交互即可微调
+ - **MetaHIN** :结合异质信息网络(利用商品类目、品牌等side information),通过元路径增强冷启动效果
+ - **PAM** :针对流式数据的在线元学习,区分不同流行度级别的物品
+
+**优势**:
+- 将每个用户视为一个task,利用相似用户(如"25岁女性"、"户外运动爱好者")的先验知识
+- 支持**零历史个性化**(Zero-shot),即完全新用户也能基于人口统计学特征给出合理初始推荐
+
+---
+
+## 二、工程实现与SaaS方案
+
+### 1. **开源工具链**
+
+| 工具 | 适用场景 | 核心算法 |
+|------|----------|----------|
+| **Vowpal Wabbit** | 实时个性化、冷启动 | Contextual Bandit (LinUCB, Thompson Sampling),支持在线学习 |
+| **Microsoft Research CB Library** | 企业级A/B测试与推荐 | UCB系列算法,Azure集成 |
+| **Ray RLlib** | 多目标优化(点击+转化+停留) | 支持Multi-Armed Bandit与深度强化学习结合 |
+
+### 2. **独立站SaaS产品技术特点**
+
+**Nosto / Klaviyo**
+- 利用**实时行为触发**弥补数据量不足:如"浏览帐篷的用户最终购买防潮垫"的关联规则
+- **跨站数据聚合**:SaaS形态允许在保护隐私前提下利用同类独立站的匿名化行为模式(联邦学习思想)
+
+**技术组合**:
+```
+冷启动期(0-3个月):Meta-Learning初始化 + Contextual Bandit探索
+增长期(3-6个月):Bayesian深度学习 + 联邦学习跨域增强
+成熟期:标准协同过滤 + 在线学习微调
+```
+
+---
+
+## 三、独立站特化的技术架构建议
+
+### 1. **分层冷启动策略**
+
+**数据层 - Embedding初始化三部曲**:
+1. **分桶共享** (Bucket Shared Embedding):按"性别+年龄段"分桶,同桶用户共享初始向量
+2. **Look-alike老带新**:找到最相似的K个老用户,平均其嵌入作为新用户初始值
+3. **元学习生成**:使用MAML训练一个生成器,输入用户画像特征,输出个性化初始Embedding
+
+**模型层 - Shortcut连接**:
+- 将`is_new_user`特征直接连接到DNN末层(Logits层),强迫模型区分新老用户,避免行为序列信号过强淹没冷启动特征
+
+### 2. **多行为隐式反馈建模**
+
+独立站通常只有点击/加购等隐式反馈,没有显式评分:
+- **行为强度分层**:将点击(弱)、加购(中)、购买(强)作为不同置信度的正样本
+- **Bandit建模**:将物品类别作为arm(而非单个item),降低arm数量,缓解计算复杂度
+
+### 3. **隐私保护的联邦增强**
+
+针对独立站数据孤岛问题:
+- **跨域联邦学习**:多个独立站联合训练共享商品嵌入,本地保留用户数据
+- **差分隐私**:在梯度更新中加入Laplace噪声,保护用户隐私同时解决冷启动
+
+---
+
+## 四、关键论文与资源
+
+1. **LinUCB**:Li et al., "A Contextual-Bandit Approach to Personalized News Article Recommendation" (2010)
+2. **MeLU (元学习)**:Lee et al., "MeLU: Meta-Learned User Preference Estimator for Cold-Start Recommendation" (WWW 2019)
+3. **EE-Net**:Chen et al., "EE-Net: Exploitation-Exploration Neural Networks in Contextual Bandits" (2021)
+4. **Thompson Sampling for Cold-start**:"Modeling implicit feedback based on bandit learning for recommendation" (Neurocomputing 2023)
+
+这些技术方案的核心优势在于:**不依赖大规模历史数据**,通过概率建模、在线学习、跨域知识迁移等方式,在数据稀缺的独立站场景下实现快速收敛和个性化。
\ No newline at end of file
diff --git a/docs/recommendation/冷启动和数据稀缺场景下推荐系统2.md b/docs/recommendation/冷启动和数据稀缺场景下推荐系统2.md
new file mode 100644
index 0000000..02efc8a
--- /dev/null
+++ b/docs/recommendation/冷启动和数据稀缺场景下推荐系统2.md
@@ -0,0 +1,153 @@
+# 独立站推荐系统在数据稀缺和冷启动场景下的先进技术调研报告
+
+## 摘要
+
+独立站(Direct-to-Consumer, DTC)电商平台在构建个性化推荐系统时,普遍面临数据稀缺和冷启动的严峻挑战。与拥有海量用户行为和商品数据的综合电商平台不同,独立站的流量规模较小,用户行为数据稀疏,且商品更新迭代速度快,导致传统的基于大规模监督学习的推荐算法难以有效应用。本报告旨在深入探讨贝叶斯学习、贝叶斯A/B实验、LinUCB以及探索-利用(Exploration-Exploitation, EE)等先进技术如何解决这些问题,并总结领先SaaS服务商在这些领域的实践经验。
+
+## 1. 引言:独立站推荐系统的挑战
+
+随着DTC模式的兴起,越来越多的品牌选择建立自己的独立站以直接触达消费者。然而,独立站的推荐系统在起步阶段和日常运营中,常常遭遇以下核心挑战:
+
+* **数据稀缺(Data Scarcity)**:新用户、新商品或长尾商品缺乏足够的交互数据,导致推荐模型无法准确学习其偏好或特征。
+* **冷启动(Cold Start)**:当系统缺乏足够的用户历史行为或商品信息时,难以生成有效的个性化推荐,从而影响用户体验和转化率。
+
+这些挑战使得依赖大量数据进行训练的深度学习模型在独立站场景下效果不佳,甚至可能因过拟合而产生次优推荐。因此,需要采用更适合小样本、高不确定性环境的算法和策略。
+
+## 2. 先进技术解决方案
+
+### 2.1 贝叶斯学习 (Bayesian Learning)
+
+贝叶斯学习提供了一种在不确定性下进行推理和决策的强大框架。它将模型参数视为随机变量,并通过结合先验知识(Prior)和观测数据(Evidence)来更新对这些参数的信念,形成后验分布(Posterior)。
+
+**核心原理:**
+
+贝叶斯定理:`P(θ|D) = P(D|θ) * P(θ) / P(D)`
+
+其中:
+* `P(θ|D)` 是后验概率,表示在给定数据D的情况下参数θ的概率。
+* `P(D|θ)` 是似然函数,表示在给定参数θ的情况下数据D的概率。
+* `P(θ)` 是先验概率,表示在观测数据D之前参数θ的信念。
+* `P(D)` 是边缘似然,作为归一化常数。
+
+**优势:**
+
+* **不确定性建模**:贝叶斯方法能够自然地量化模型参数的不确定性。在数据稀少时,后验分布会更宽,明确地反映出模型对参数估计的不确定性,这有助于在推荐决策中考虑风险 [1]。
+* **引入先验知识**:可以通过先验分布融入领域专家知识、历史数据或来自其他平台的通用模式,有效缓解冷启动问题。例如,对于新商品,可以利用其品类、品牌等元数据构建先验,而非从零开始学习 [2]。
+
+**应用案例:**
+
+* **Amazon**:在产品搜索中利用经验贝叶斯(Empirical Bayes)方法处理冷启动问题,通过聚合相似商品的统计数据来为新商品提供初始的质量估计 [3]。
+* **BBC**:数据科学团队开发了基于贝叶斯方法的冷启动推荐原型,即使只有少量交互数据也能生成高性能的推荐 [4]。
+
+### 2.2 贝叶斯 A/B 实验 (Bayesian A/B Testing)
+
+A/B测试是评估推荐系统效果的关键工具。传统的频率派A/B测试依赖于P值和固定样本量,在数据稀缺或需要快速迭代的独立站环境中存在局限性。贝叶斯A/B测试则提供了更灵活和直观的决策方式。
+
+**频率派与贝叶斯派对比:**
+
+| 特性 | 频率派 A/B 测试 | 贝叶斯 A/B 测试 |
+| :----------- | :----------------------------------- | :----------------------------------- |
+| **核心问题** | 假设零假设(无差异),计算观测结果发生的概率(P值) | 计算各版本优于基线的概率,以及各版本之间优劣的概率 |
+| **数据窥视** | 不允许(会导致P值失效) | 允许(可随时查看结果并更新信念) |
+| **决策依据** | P值是否小于显著性水平(如0.05) | 后验概率(如B优于A的概率 > 95%)和预期增益区间 |
+| **结果呈现** | 仅能判断是否存在统计显著差异 | 提供预期增益的概率分布和置信区间 |
+| **样本量** | 需要预先确定,达到后才能停止 | 可根据置信度动态停止(Sequential Testing) |
+
+**优势:**
+
+* **灵活的停止规则**:允许在实验过程中随时查看数据并做出决策,无需等待预设样本量,这对于独立站快速迭代和资源有限的场景至关重要 [5]。
+* **直观的商业决策**:直接给出“版本B优于版本A的概率”以及“预期增益”的概率分布,使得业务人员能更清晰地理解实验结果,并权衡切换成本与潜在收益 [6]。
+* **更好地处理小样本**:通过引入先验,贝叶斯方法在小样本情况下也能给出相对稳健的估计。
+
+**SaaS 应用:**
+
+* **AB Tasty** 等主流实验优化平台已采用贝叶斯统计方法,以提供更灵活、更具商业洞察力的A/B测试结果 [5]。
+
+### 2.3 LinUCB 与 EE 探索利用 (Bandit 算法)
+
+多臂老虎机(Multi-Armed Bandit, MAB)算法,特别是其上下文感知版本(Contextual Bandit),是解决推荐系统中探索与利用(Exploration-Exploitation, EE)两难困境的有效工具。在数据稀缺和冷启动场景下,MAB算法能够动态地平衡探索新商品以获取更多信息和利用已知最优商品以最大化收益。
+
+**探索与利用 (Exploration-Exploitation, EE):**
+
+* **探索 (Exploration)**:尝试推荐新商品或不确定性高的商品,以发现潜在的用户兴趣或商品价值。
+* **利用 (Exploitation)**:推荐已知能带来高收益的商品,以最大化短期回报。
+
+**LinUCB (Linear Upper Confidence Bound):**
+
+LinUCB是一种经典的上下文MAB算法,它假设每个“臂”(即推荐商品)的奖励(如点击率、购买率)是其特征的线性函数。它通过维护每个臂的线性模型参数,并利用置信区间上界(Upper Confidence Bound)来指导选择。
+
+**核心原理:**
+
+LinUCB在每次推荐时,会为每个候选商品计算一个分数,该分数由两部分组成:
+
+1. **预测奖励**:基于当前线性模型对该商品特征的预测。
+2. **探索奖励**:基于该商品参数估计的不确定性(通常是置信区间宽度),不确定性越高,探索奖励越大。
+
+选择分最高的商品进行推荐。随着商品被探索,其参数估计的不确定性会降低,探索奖励也会随之减少,从而实现从探索到利用的平滑过渡 [7]。
+
+**优势:**
+
+* **实时适应性**:能够根据用户的实时反馈(点击、购买)快速更新模型,适应用户偏好和商品热度的动态变化。
+* **冷启动友好**:新商品由于缺乏交互数据,其参数估计的不确定性高,因此会获得较高的探索奖励,从而有机会被推荐给用户,有效缓解商品冷启动 [8]。
+* **上下文感知**:可以整合用户特征、商品特征、场景特征等上下文信息,生成更个性化的推荐。
+
+**应用案例:**
+
+* **Yahoo!**:在新闻推荐中广泛使用LinUCB,通过结合新闻文章的特征和用户上下文,实现个性化新闻分发 [9]。
+* **阿里巴巴**:在商品推荐中采用LinUCB,并结合用户浏览模型来处理位置偏差,优化移动端推荐效果 [10]。
+
+### 2.4 Thompson Sampling (汤普森采样)
+
+Thompson Sampling是另一种流行的贝叶斯MAB算法,它通过从每个臂的后验奖励分布中进行采样来选择臂。它在理论上具有较好的性能,并且在实践中表现出色。
+
+**核心原理:**
+
+对于每个候选商品,Thompson Sampling会维护一个关于其奖励的后验概率分布(例如,对于二元奖励,可以使用Beta分布)。在每次推荐时,它会从每个商品的后验分布中随机抽取一个样本值,然后选择样本值最高的商品进行推荐 [11]。
+
+**优势:**
+
+* **自然地平衡探索与利用**:采样过程本身就体现了探索与利用的平衡。不确定性高的商品(后验分布较宽)有更大的机会被采样到高值,从而获得探索机会;而确定性高的商品(后验分布较窄)则倾向于被采样到接近其真实均值的值,从而被利用。
+* **理论性能优越**:在许多场景下,Thompson Sampling被证明具有接近最优的遗憾(Regret)性能。
+* **易于实现**:对于某些简单的奖励分布(如伯努利奖励),其实现相对直观。
+
+**应用案例:**
+
+* **Doordash**:利用Thompson Sampling进行菜系推荐,根据用户对不同菜系的订单历史构建Beta分布,从而个性化展示菜系过滤器 [12]。
+* **Amazon**:在优化页面布局时采用多元Thompson Sampling,通过对模型权重进行采样来探索不同的布局组合 [13]。
+
+## 3. 领先SaaS厂商的实践
+
+许多为独立站提供推荐服务的SaaS厂商,已经将上述先进技术融入其产品中,以帮助客户应对数据挑战:
+
+* **Algolia**:作为搜索和推荐SaaS,Algolia强调利用预训练AI算法和合成数据来解决冷启动问题,并提供实时推理能力,确保推荐的及时性和相关性 [14]。
+* **Nosto**:专注于电商个性化,其推荐引擎利用Bandit算法进行实时多变量优化,以平衡新商品的曝光和个性化推荐的准确性。
+* **Spotify / Netflix**:虽然不是纯粹的独立站SaaS,但这些内容平台在处理新内容分发、UI布局优化和封面图A/B测试时,大量采用了Bandit算法和贝叶斯方法,其经验对独立站具有借鉴意义 [15] [16]。
+
+## 4. 总结与展望
+
+独立站推荐系统在数据稀缺和冷启动场景下,传统的监督学习方法面临巨大挑战。贝叶斯学习、贝叶斯A/B实验、LinUCB和Thompson Sampling等先进技术,通过有效建模不确定性、引入先验知识以及动态平衡探索与利用,为独立站提供了强大的解决方案。
+
+* **贝叶斯学习**:通过后验分布量化不确定性,并利用先验知识缓解数据稀疏。
+* **贝叶斯A/B实验**:提供更灵活、更具商业洞察力的实验评估方式,加速产品迭代。
+* **LinUCB和Thompson Sampling**:作为上下文MAB算法,能够实时适应用户偏好,并有效解决新商品和新用户的冷启动问题。
+
+未来,随着独立站生态的不断发展,这些技术将继续演进,并可能与更复杂的深度学习模型相结合,形成混合推荐系统,以在数据充足时发挥深度学习的强大拟合能力,在数据稀缺时则依赖贝叶斯和Bandit算法的鲁棒性。同时,可解释性(Explainability)和公平性(Fairness)也将成为独立站推荐系统发展的重要方向。
+
+## 参考文献
+
+[1] BayesCNS: A Unified Bayesian Approach to Address Cold Start and Non-Stationarity in Search Systems at Scale. [https://arxiv.org/html/2410.02126v1](https://arxiv.org/html/2410.02126v1)
+[2] Bayesian cold-start recommender - by Matt Crooks. [https://medium.com/bbc-data-science/bayesian-cold-start-recommender-810c1cf9a5d6](https://medium.com/bbc-data-science/bayesian-cold-start-recommender-810c1cf9a5d6)
+[3] Addressing cold start in product search via empirical bayes. [https://www.amazon.science/publications/addressing-cold-start-in-product-search-via-empirical-bayes](https://www.amazon.science/publications/addressing-cold-start-in-product-search-via-empirical-bayes)
+[4] BayesCNS: A Unified Bayesian Approach to Address Cold Start and Non-Stationarity in Search Systems at Scale. [https://ojs.aaai.org/index.php/AAAI/article/download/31975/34130](https://ojs.aaai.org/index.php/AAAI/article/download/31975/34130)
+[5] Frequentist vs Bayesian Methods in A/B Testing - Which is Better? [https://www.abtasty.com/blog/bayesian-ab-testing/](https://www.abtasty.com/blog/bayesian-ab-testing/)
+[6] A comparative study of frequentist vs Bayesian A/B testing in the detection of E-commerce fraud. [https://www.emerald.com/jebde/article/1/1-2/3/225916/A-comparative-study-of-frequentist-vs-Bayesian-A-B](https://www.emerald.com/jebde/article/1/1-2/3/225916/A-comparative-study-of-frequentist-vs-Bayesian-A-B)
+[7] Recommender systems using LinUCB: A contextual multi-armed bandit approach. [https://medium.com/data-science/recommender-systems-using-linucb-a-contextual-multi-armed-bandit-approach-35a6f0eb6c4](https://medium.com/data-science/recommender-systems-using-linucb-a-contextual-multi-armed-bandit-approach-35a6f0eb6c4)
+[8] Bandits for Recommender Systems. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
+[9] The classic example of UCB is Yahoo’s LinUCB for news recommendations. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
+[10] Alibaba’s LinUCB for item recommendations. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
+[11] Now, why should we care about Recommendation Systems? ft. a soft introduction to Thompson Sampling. [https://towardsdatascience.com/now-why-should-we-care-about-recommendation-systems-ft-a-soft-introduction-to-thompson-sampling-b9483b43f262/](https://towardsdatascience.com/now-why-should-we-care-about-recommendation-systems-ft-a-soft-introduction-to-thompson-sampling-b9483b43f262/)
+[12] Doordash’s bandits for cuisine recommendations. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
+[13] Amazon’s multivariate bandits to optimize page layouts. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
+[14] Solving the cold start problem with synthetic data. [https://www.algolia.com/blog/ai/using-pre-trained-ai-algorithms-to-solve-the-cold-start-problem](https://www.algolia.com/blog/ai/using-pre-trained-ai-algorithms-to-solve-the-cold-start-problem)
+[15] Bandits for Recommender Systems. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
+[16] Artwork Personalization at Netflix. [https://eugeneyan.com/writing/bandits/](https://eugeneyan.com/writing/bandits/)
diff --git a/docs/搜索API对接指南.md b/docs/搜索API对接指南.md
index 3e11f18..8a43a79 100644
--- a/docs/搜索API对接指南.md
+++ b/docs/搜索API对接指南.md
@@ -64,7 +64,7 @@
### 1.1 基础信息
-- **Base URL**: `http://your-domain:6002` 或 `http://120.76.41.98:6002`
+- **Base URL**: `http://120.76.41.98:6002`
- **协议**: HTTP/HTTPS
- **数据格式**: JSON
- **字符编码**: UTF-8
@@ -715,8 +715,7 @@ curl "http://localhost:6002/search/12345"
"translations": {
"en": "barbie doll"
},
- "domain": "default",
- "has_vector": true
+ "domain": "default"
},
"suggestions": [],
"related_searches": [],
@@ -739,7 +738,7 @@ curl "http://localhost:6002/search/12345"
| `total` | integer | 匹配的总文档数 |
| `max_score` | float | 最高相关性分数 |
| `facets` | array | 分面统计结果 |
-| `query_info` | object | 查询处理信息,见 [4.2.1 query_info 说明](#421-query_info-说明) |
+| `query_info` | object | query处理信息 |
| `took_ms` | integer | 搜索耗时(毫秒) |
#### 4.2.1 query_info 说明
@@ -754,7 +753,6 @@ curl "http://localhost:6002/search/12345"
| `detected_language` | string | 检测到的查询语言(如 `zh`、`en`) |
| `translations` | object | 翻译结果,键为语言代码,值为翻译文本 |
| `domain` | string | 查询域(如 `default`、`title`、`brand` 等) |
-| `has_vector` | boolean | 是否生成了查询向量(用于语义检索) |
### 4.3 SpuResult字段说明
diff --git a/frontend/static/js/app.js b/frontend/static/js/app.js
index 7ef39f2..995f64b 100644
--- a/frontend/static/js/app.js
+++ b/frontend/static/js/app.js
@@ -767,7 +767,6 @@ function displayDebugInfo(data) {
html += `
boolean_ast: ${escapeHtml(debugInfo.query_analysis.boolean_ast)}
`;
}
- html += `has_vector: ${debugInfo.query_analysis.has_vector ? 'enabled' : 'disabled'}
`;
html += '';
}
diff --git a/query/query_parser.py b/query/query_parser.py
index b4f32e9..992c3c5 100644
--- a/query/query_parser.py
+++ b/query/query_parser.py
@@ -313,8 +313,7 @@ class QueryParser:
should_generate_embedding = (
generate_vector and
self.config.query_config.enable_text_embedding and
- domain == "default" and
- not is_short_query
+ domain == "default"
)
encoding_executor = None
diff --git a/query/translator.py b/query/translator.py
index 117b234..b452eef 100644
--- a/query/translator.py
+++ b/query/translator.py
@@ -45,6 +45,7 @@ from concurrent.futures import ThreadPoolExecutor, Future
from datetime import timedelta
from typing import Dict, List, Optional, Union
import logging
+import time
logger = logging.getLogger(__name__)
@@ -349,6 +350,7 @@ class Translator:
}
]
+ start_time = time.time()
try:
completion = self.qwen_client.chat.completions.create(
model=self.QWEN_MODEL,
@@ -359,17 +361,19 @@ class Translator:
)
translated_text = completion.choices[0].message.content.strip()
+ duration_ms = (time.time() - start_time) * 1000
- logger.debug(
+ logger.info(
f"[Translator] Qwen API response success | Original text: '{text}' | Target language: {target_lang_qwen} | "
- f"Translation result: '{translated_text}'"
+ f"Translation result: '{translated_text}' | Duration: {duration_ms:.2f} ms"
)
return translated_text
except Exception as e:
+ duration_ms = (time.time() - start_time) * 1000
logger.error(
f"[Translator] Qwen API request exception | Original text: '{text}' | Target language: {target_lang_qwen} | "
- f"Error: {e}", exc_info=True
+ f"Duration: {duration_ms:.2f} ms | Error: {e}", exc_info=True
)
return None
diff --git a/reranker/README.md b/reranker/README.md
new file mode 100644
index 0000000..268534a
--- /dev/null
+++ b/reranker/README.md
@@ -0,0 +1,87 @@
+# Reranker Service (BGE v2 m3)
+
+A minimal, production-ready reranker service based on **BAAI/bge-reranker-v2-m3**.
+
+Features
+- FP16 on GPU
+- Length-based sorting to reduce padding waste
+- Deduplication to avoid redundant inference
+- Scores returned in original input order
+- Simple FastAPI service
+
+## Files
+- `reranker/bge_reranker.py`: core model loading + scoring logic
+- `reranker/server.py`: FastAPI service with `/health` and `/rerank`
+- `reranker/config.py`: simple configuration
+
+## Requirements
+Install Python deps (already in project requirements):
+- `torch`
+- `modelscope`
+- `fastapi`
+- `uvicorn`
+
+## Configuration
+Edit `reranker/config.py`:
+- `MODEL_NAME`: default `BAAI/bge-reranker-v2-m3`
+- `DEVICE`: `None` (auto), `cuda`, or `cpu`
+- `USE_FP16`: enable fp16 on GPU
+- `BATCH_SIZE`: default 64
+- `MAX_LENGTH`: default 512
+- `PORT`: default 6007
+- `MAX_DOCS`: request limit (default 1000)
+
+## Run the Service
+```bash
+uvicorn reranker.server:app --host 0.0.0.0 --port 6007
+```
+
+## API
+### Health
+```
+GET /health
+```
+
+### Rerank
+```
+POST /rerank
+Content-Type: application/json
+
+{
+ "query": "wireless mouse",
+ "docs": ["logitech mx master", "usb cable", "wireless mouse bluetooth"]
+}
+```
+
+Response:
+```
+{
+ "scores": [0.93, 0.02, 0.88],
+ "meta": {
+ "input_docs": 3,
+ "usable_docs": 3,
+ "unique_docs": 3,
+ "dedup_ratio": 0.0,
+ "elapsed_ms": 12.4,
+ "model": "BAAI/bge-reranker-v2-m3",
+ "device": "cuda",
+ "fp16": true,
+ "batch_size": 64,
+ "max_length": 512,
+ "normalize": true,
+ "service_elapsed_ms": 13.1
+ }
+}
+```
+
+## Logging
+The service uses standard Python logging. For structured logs and full output,
+run uvicorn with:
+```bash
+uvicorn reranker.server:app --host 0.0.0.0 --port 6007 --log-level info
+```
+
+## Notes
+- No caching is used by design.
+- Inputs are deduplicated by exact string match.
+- Empty or null docs are skipped and scored as 0.
diff --git a/reranker/bge_reranker.py b/reranker/bge_reranker.py
new file mode 100644
index 0000000..5385463
--- /dev/null
+++ b/reranker/bge_reranker.py
@@ -0,0 +1,256 @@
+"""
+Minimal BGE reranker for pairwise scoring (query, doc).
+
+Features:
+- Model loading with optional FP16
+- Length-based sorting to reduce padding waste
+- Deduplication to avoid redundant inference
+- Scores returned in original doc order
+"""
+
+import logging
+import math
+import threading
+import time
+from typing import Any, Dict, List, Optional, Tuple
+
+import torch
+from modelscope import AutoModelForSequenceClassification, AutoTokenizer
+
+logger = logging.getLogger("reranker.core")
+
+
+class BGEReranker:
+ def __init__(
+ self,
+ model_name: str = "BAAI/bge-reranker-v2-m3",
+ device: Optional[str] = None,
+ batch_size: int = 64,
+ use_fp16: bool = True,
+ max_length: int = 512,
+ cache_dir: str = "./model_cache",
+ enable_warmup: bool = True,
+ ) -> None:
+ self.model_name = model_name
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
+ self.batch_size = max(1, int(batch_size))
+ self.max_length = int(max_length)
+ self.use_fp16 = bool(use_fp16 and self.device == "cuda")
+ self._lock = threading.Lock()
+
+ logger.info(
+ "[BGE_RERANKER] Loading model %s on %s (fp16=%s)",
+ self.model_name,
+ self.device,
+ self.use_fp16,
+ )
+
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ self.model_name, trust_remote_code=True, cache_dir=cache_dir
+ )
+ self.model = AutoModelForSequenceClassification.from_pretrained(
+ self.model_name, trust_remote_code=True, cache_dir=cache_dir
+ )
+
+ self.model = self.model.to(self.device)
+ self.model.eval()
+
+ if self.use_fp16:
+ self.model = self.model.half()
+
+ if self.device == "cuda":
+ torch.backends.cudnn.benchmark = True
+
+ if enable_warmup:
+ self._warmup()
+
+ logger.info(
+ "[BGE_RERANKER] Model ready | model=%s device=%s fp16=%s batch=%s max_len=%s",
+ self.model_name,
+ self.device,
+ self.use_fp16,
+ self.batch_size,
+ self.max_length,
+ )
+
+ def _warmup(self) -> None:
+ try:
+ with torch.inference_mode():
+ pairs = [["warmup", "warmup"]]
+ inputs = self.tokenizer(
+ pairs,
+ padding=True,
+ truncation=True,
+ return_tensors="pt",
+ max_length=self.max_length,
+ )
+ inputs = {k: v.to(self.device) for k, v in inputs.items()}
+ if self.use_fp16:
+ inputs = {
+ k: (v.half() if v.dtype == torch.float32 else v)
+ for k, v in inputs.items()
+ }
+ _ = self.model(**inputs, return_dict=True).logits
+ if self.device == "cuda":
+ torch.cuda.synchronize()
+ except Exception as exc:
+ logger.warning("[BGE_RERANKER] Warmup failed: %s", exc)
+
+ def score(self, query: str, docs: List[str], normalize: bool = True) -> List[float]:
+ scores, _meta = self.score_with_meta(query, docs, normalize=normalize)
+ return scores
+
+ def score_with_meta(
+ self, query: str, docs: List[str], normalize: bool = True
+ ) -> Tuple[List[float], Dict[str, Any]]:
+ start_ts = time.time()
+
+ if docs is None:
+ docs = []
+
+ query = "" if query is None else str(query).strip()
+ total_docs = len(docs)
+ output_scores: List[float] = [0.0] * total_docs
+
+ indexed_docs: List[Tuple[int, str]] = []
+ for i, doc in enumerate(docs):
+ if doc is None:
+ continue
+ text = str(doc).strip()
+ if not text:
+ continue
+ indexed_docs.append((i, text))
+
+ if not query or not indexed_docs:
+ elapsed_ms = (time.time() - start_ts) * 1000.0
+ return output_scores, {
+ "input_docs": total_docs,
+ "usable_docs": len(indexed_docs),
+ "unique_docs": 0,
+ "dedup_ratio": 0.0,
+ "elapsed_ms": round(elapsed_ms, 3),
+ }
+
+ # Sort by estimated length + text to cluster similar lengths
+ indexed_docs.sort(key=lambda x: (len(x[1]), x[1]))
+
+ unique_texts: List[str] = []
+ position_to_unique: List[int] = []
+ prev_text: Optional[str] = None
+
+ for _idx, text in indexed_docs:
+ if text != prev_text:
+ unique_texts.append(text)
+ prev_text = text
+ position_to_unique.append(len(unique_texts) - 1)
+
+ logger.debug(
+ "[BGE_RERANKER] Preprocess | input=%d usable=%d unique=%d",
+ total_docs,
+ len(indexed_docs),
+ len(unique_texts),
+ )
+
+ unique_scores = self._score_unique(
+ query=query, passages=unique_texts, normalize=normalize
+ )
+
+ for (orig_idx, _text), unique_idx in zip(indexed_docs, position_to_unique):
+ output_scores[orig_idx] = float(unique_scores[unique_idx])
+
+ elapsed_ms = (time.time() - start_ts) * 1000.0
+ dedup_ratio = 0.0
+ if indexed_docs:
+ dedup_ratio = 1.0 - (len(unique_texts) / float(len(indexed_docs)))
+
+ meta = {
+ "input_docs": total_docs,
+ "usable_docs": len(indexed_docs),
+ "unique_docs": len(unique_texts),
+ "dedup_ratio": round(dedup_ratio, 4),
+ "elapsed_ms": round(elapsed_ms, 3),
+ "model": self.model_name,
+ "device": self.device,
+ "fp16": self.use_fp16,
+ "batch_size": self.batch_size,
+ "max_length": self.max_length,
+ "normalize": normalize,
+ }
+
+ logger.info(
+ "[BGE_RERANKER] Done | input=%d usable=%d unique=%d dedup=%s elapsed_ms=%s",
+ meta["input_docs"],
+ meta["usable_docs"],
+ meta["unique_docs"],
+ meta["dedup_ratio"],
+ meta["elapsed_ms"],
+ )
+
+ return output_scores, meta
+
+ def _compute_optimal_batch_size(self, total: int) -> int:
+ if total <= 0:
+ return 1
+ current_batch_size = self.batch_size + 8
+ current_batch_count = math.ceil(total / current_batch_size)
+
+ optimal_batch_size = current_batch_size
+ test_batch_size = current_batch_size - 4
+
+ while test_batch_size > 0:
+ test_batch_count = math.ceil(total / test_batch_size)
+ if test_batch_count <= current_batch_count:
+ optimal_batch_size = test_batch_size
+ test_batch_size -= 4
+ else:
+ break
+
+ return max(1, optimal_batch_size)
+
+ def _score_unique(
+ self, query: str, passages: List[str], normalize: bool = True
+ ) -> List[float]:
+ if not passages:
+ return []
+
+ optimal_batch_size = self._compute_optimal_batch_size(len(passages))
+
+ logger.info(
+ "[BGE_RERANKER] Reranking %d unique passages | batch=%d | device=%s | fp16=%s",
+ len(passages),
+ optimal_batch_size,
+ self.device,
+ self.use_fp16,
+ )
+
+ scores: List[float] = []
+
+ with self._lock:
+ for i in range(0, len(passages), optimal_batch_size):
+ batch_passages = passages[i : i + optimal_batch_size]
+ pairs = [[query, passage] for passage in batch_passages]
+
+ with torch.inference_mode():
+ inputs = self.tokenizer(
+ pairs,
+ padding=True,
+ truncation=True,
+ return_tensors="pt",
+ max_length=self.max_length,
+ add_special_tokens=True,
+ )
+ inputs = {k: v.to(self.device) for k, v in inputs.items()}
+
+ if self.use_fp16:
+ inputs = {
+ k: (v.half() if v.dtype == torch.float32 else v)
+ for k, v in inputs.items()
+ }
+
+ logits = self.model(**inputs, return_dict=True).logits.view(-1).float()
+ if normalize:
+ logits = torch.sigmoid(logits)
+ batch_scores = logits.detach().cpu().numpy().tolist()
+ scores.extend(batch_scores)
+
+ return scores
diff --git a/reranker/config.py b/reranker/config.py
new file mode 100644
index 0000000..43b569d
--- /dev/null
+++ b/reranker/config.py
@@ -0,0 +1,25 @@
+"""Reranker service configuration (simple Python config)."""
+
+
+class RerankerConfig(object):
+ # Server
+ HOST = "0.0.0.0"
+ PORT = 6007
+
+ # Model
+ MODEL_NAME = "BAAI/bge-reranker-v2-m3"
+ DEVICE = None # None -> auto (cuda if available)
+ USE_FP16 = True
+ BATCH_SIZE = 64
+ MAX_LENGTH = 512
+ CACHE_DIR = "./model_cache"
+ ENABLE_WARMUP = True
+
+ # Request limits
+ MAX_DOCS = 1000
+
+ # Output
+ NORMALIZE = True
+
+
+CONFIG = RerankerConfig()
diff --git a/reranker/server.py b/reranker/server.py
new file mode 100644
index 0000000..f792996
--- /dev/null
+++ b/reranker/server.py
@@ -0,0 +1,138 @@
+"""
+FastAPI service for BGE reranking.
+
+POST /rerank
+Request:
+{
+ "query": "...",
+ "docs": ["doc1", "doc2", ...]
+}
+
+Response:
+{
+ "scores": [0.98, 0.12, ...],
+ "meta": {...}
+}
+"""
+
+import logging
+import time
+from typing import Any, Dict, List, Optional
+
+from fastapi import FastAPI, HTTPException
+from pydantic import BaseModel, Field
+
+from reranker.bge_reranker import BGEReranker
+from reranker.config import CONFIG
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s %(levelname)s %(name)s | %(message)s",
+)
+logger = logging.getLogger("reranker.service")
+
+app = FastAPI(title="SearchEngine Reranker Service", version="1.0.0")
+
+_reranker: Optional[BGEReranker] = None
+
+
+class RerankRequest(BaseModel):
+ query: str = Field(..., description="Search query")
+ docs: List[str] = Field(..., description="Documents/passages to rerank")
+ normalize: Optional[bool] = Field(
+ default=CONFIG.NORMALIZE, description="Apply sigmoid normalization"
+ )
+
+
+class RerankResponse(BaseModel):
+ scores: List[float] = Field(..., description="Scores aligned to input docs order")
+ meta: Dict[str, Any] = Field(default_factory=dict)
+
+
+@app.on_event("startup")
+def load_model() -> None:
+ global _reranker
+ logger.info("Starting reranker service on port %s", CONFIG.PORT)
+ try:
+ _reranker = BGEReranker(
+ model_name=CONFIG.MODEL_NAME,
+ device=CONFIG.DEVICE,
+ batch_size=CONFIG.BATCH_SIZE,
+ use_fp16=CONFIG.USE_FP16,
+ max_length=CONFIG.MAX_LENGTH,
+ cache_dir=CONFIG.CACHE_DIR,
+ enable_warmup=CONFIG.ENABLE_WARMUP,
+ )
+ logger.info(
+ "Reranker ready | model=%s device=%s fp16=%s batch=%s max_len=%s",
+ CONFIG.MODEL_NAME,
+ _reranker.device,
+ _reranker.use_fp16,
+ _reranker.batch_size,
+ _reranker.max_length,
+ )
+ except Exception as exc:
+ logger.error("Failed to initialize reranker: %s", exc, exc_info=True)
+ raise
+
+
+@app.get("/health")
+def health() -> Dict[str, Any]:
+ return {
+ "status": "ok" if _reranker is not None else "unavailable",
+ "model_loaded": _reranker is not None,
+ "model": CONFIG.MODEL_NAME,
+ "device": CONFIG.DEVICE,
+ }
+
+
+@app.post("/rerank", response_model=RerankResponse)
+def rerank(request: RerankRequest) -> RerankResponse:
+ if _reranker is None:
+ raise HTTPException(status_code=503, detail="Reranker model not loaded")
+
+ query = (request.query or "").strip()
+ if not query:
+ raise HTTPException(status_code=400, detail="query cannot be empty")
+
+ if request.docs is None or len(request.docs) == 0:
+ raise HTTPException(status_code=400, detail="docs cannot be empty")
+
+ if len(request.docs) > CONFIG.MAX_DOCS:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Too many docs: {len(request.docs)} > {CONFIG.MAX_DOCS}",
+ )
+
+ normalize = CONFIG.NORMALIZE if request.normalize is None else bool(request.normalize)
+
+ start_ts = time.time()
+ logger.info(
+ "Rerank request | docs=%d normalize=%s",
+ len(request.docs),
+ normalize,
+ )
+ scores, meta = _reranker.score_with_meta(query, request.docs, normalize=normalize)
+ meta = dict(meta)
+ meta.update({"service_elapsed_ms": round((time.time() - start_ts) * 1000.0, 3)})
+ logger.info(
+ "Rerank done | docs=%d unique=%s dedup=%s elapsed_ms=%s",
+ meta.get("input_docs"),
+ meta.get("unique_docs"),
+ meta.get("dedup_ratio"),
+ meta.get("service_elapsed_ms"),
+ )
+
+ return RerankResponse(scores=scores, meta=meta)
+
+
+if __name__ == "__main__":
+ import uvicorn
+
+ uvicorn.run(
+ "reranker.server:app",
+ host=CONFIG.HOST,
+ port=CONFIG.PORT,
+ reload=False,
+ log_level="info",
+ )
--
libgit2 0.21.2