diff --git a/app.py b/app.py
index 0640246..99bc344 100644
--- a/app.py
+++ b/app.py
@@ -321,7 +321,7 @@ def display_product_card_from_item(product: ProductItem) -> None:
if product.price is not None:
st.caption(f"¥{product.price:.2f}")
- label_style = "⭐" if product.match_label == "Highly Relevant" else "✦"
+ label_style = "⭐" if product.match_label == "Relevant" else "✦"
st.caption(f"{label_style} {product.match_label}")
@@ -331,7 +331,7 @@ def render_search_result_block(result: SearchResult) -> None:
Shows:
- A styled header with query + match counts + quality_summary (if any)
- - A grid of product cards (Highly Relevant first, then Partially Relevant; max 6)
+ - A grid of product cards (Relevant first, then Partially Relevant; max 6)
"""
summary_line = f' · {result.quality_summary}' if result.quality_summary else ''
header_html = (
@@ -339,7 +339,7 @@ def render_search_result_block(result: SearchResult) -> None:
f'margin:8px 0 4px 0;background:#fafafa;">'
f''
f'🔍 {result.query}'
- f' · Highly Relevant {result.perfect_count} 件'
+ f' · Relevant {result.perfect_count} 件'
f' · Partially Relevant {result.partial_count} 件'
f'{summary_line}'
f''
@@ -347,7 +347,7 @@ def render_search_result_block(result: SearchResult) -> None:
st.markdown(header_html, unsafe_allow_html=True)
# Perfect matches first, fall back to partials if none
- perfect = [p for p in result.products if p.match_label == "Highly Relevant"]
+ perfect = [p for p in result.products if p.match_label == "Relevant"]
partial = [p for p in result.products if p.match_label == "Partially Relevant"]
to_show = (perfect + partial)[:6] if perfect else partial[:6]
@@ -361,14 +361,20 @@ def render_search_result_block(result: SearchResult) -> None:
display_product_card_from_item(product)
-def render_message_with_refs(content: str, session_id: str) -> None:
+def render_message_with_refs(
+ content: str,
+ session_id: str,
+ fallback_refs: Optional[dict] = None,
+) -> None:
"""
Render an assistant message that may contain [SEARCH_REF:xxx] tokens.
Text segments are rendered as markdown.
[SEARCH_REF:xxx] tokens are replaced with full product card blocks
- loaded from the global registry.
+ loaded from the global registry, or from fallback_refs (e.g. refs stored
+ with the message so they survive reruns / different workers).
"""
+ fallback_refs = fallback_refs or {}
# re.split with a capture group alternates: [text, ref_id, text, ref_id, ...]
parts = SEARCH_REF_PATTERN.split(content)
@@ -381,7 +387,7 @@ def render_message_with_refs(content: str, session_id: str) -> None:
else:
# ref_id segment
ref_id = segment.strip()
- result = global_registry.get(session_id, ref_id)
+ result = global_registry.get(session_id, ref_id) or fallback_refs.get(ref_id)
if result:
render_search_result_block(result)
else:
@@ -450,7 +456,9 @@ def display_message(message: dict):
# Render message: expand [SEARCH_REF:xxx] tokens into product card blocks
session_id = st.session_state.get("session_id", "")
- render_message_with_refs(content, session_id)
+ render_message_with_refs(
+ content, session_id, fallback_refs=message.get("search_refs")
+ )
st.markdown("", unsafe_allow_html=True)
@@ -671,13 +679,14 @@ def main():
tool_calls = result.get("tool_calls", [])
debug_steps = result.get("debug_steps", [])
- # Add assistant message
+ # Add assistant message (store search_refs so refs resolve after rerun)
st.session_state.messages.append(
{
"role": "assistant",
"content": response,
"tool_calls": tool_calls,
"debug_steps": debug_steps,
+ "search_refs": result.get("search_refs", {}),
}
)
diff --git a/app/agents/shopping_agent.py b/app/agents/shopping_agent.py
index 033e62c..674d1bc 100644
--- a/app/agents/shopping_agent.py
+++ b/app/agents/shopping_agent.py
@@ -10,6 +10,8 @@ Architecture:
import json
import logging
+import re
+from datetime import datetime
from pathlib import Path
from typing import Any, Optional, Sequence
@@ -33,7 +35,30 @@ logger = logging.getLogger(__name__)
# 1. Guides multi-query search planning with explicit evaluate-and-decide loop
# 2. Forbids re-listing product details in the final response
# 3. Mandates [SEARCH_REF:xxx] inline citation as the only product presentation mechanism
-SYSTEM_PROMPT = """ 角色定义
+SYSTEM_PROMPT = f"""角色定义
+你是我们店铺的一名专业的电商导购,是一个善于倾听、主动引导、懂得搭配的“时尚顾问”,通过有温度的对话,给用户提供有价值的信息,包括需求引导、方案推荐、搜索结果推荐,最终促成满意的购物决策或转化行为。
+作为我们店铺的一名专业的销售,除了本店铺的商品的推荐,你可以给用户提供有帮助的信息,但是不要虚构商品、提供本商店搜索结果以外的商品。
+
+一些原则:
+1. 价值提供与信息收集的原则:
+ 1. 优先价值提供:适时的提供有价值的信息,如商品推荐、穿搭建议、趋势信息,在推荐方向上有需求缺口、需要明确的重要信息时,要适时的做“信息收集”,引导式的澄清需求、提高商品发现的效率,形成“提供-反馈”的良性循环。
+ 2. 意图判断-缺口大(比如品类或者使用人群都不能确定):从“品类”、“场景”、“效果”等宽泛的意图切入,给出方案推荐 + 1-2个关键问题让用户选择;示例:
+ 1. 你想穿出哪种感觉?职场干练 松弛自在 活力元气 温柔知性
+ 2. 平时通勤场合多吗?还是更喜欢生活化穿搭?
+ 3. 意图判断-缺口小:直接检索+方案呈现,根据情况,可以考虑该方向下重要的决策因素(思考哪些维度最可能影响推荐结果),进行提议和问题收集,让用户既得到相关信息、又得到下一步的方向引导、同时也有机会修正或者细化诉求。
+ 4. 选项驱动式澄清:推荐几个清晰的方向,呈现方案或商品搜索结果,再做澄清
+ 5. 单轮对话最好只提一个问题,最多两个,禁止多问题堆叠。
+ 6. 站在用户立场思考:比如询问用户期待的效果或感觉、使用的场合、想解决的问题,而不是询问具体的款式、参数,你需要将用户表达的需求翻译为具体可检索的商品特征(版型、材质、设计元素、风格标签等),并据此筛选商品、组织推荐逻辑。
+2. 如何使用make_search_products_tool:
+ 1. 可以生成多个query进行搜索:在需要搜索商品的时候,可以将需求分解为 2-4 个搜索查询,每个 query 聚焦一个明确的商品子类或搜索角度。
+ 2. 可以根据搜索结果调整搜索策略:每次调用 search_products 后,工具会返回搜索结果的相关性的判断、以及搜索结果的topN的title,你需要决策是否要调整搜索策略,比如结果质量太差,可能需要调整搜索词、或者加大试探的query数量(不要超过3-5个)。结果太差的原因有可能是你生成的query不合理、请根据你看到的商品名称的构成组织搜索关键词。
+3. 在最终回复中使用 [SEARCH_REF:xxx] 内联引用搜索结果:
+ 1. 搜索工具会返回一个结果引用标识[SEARCH_REF:xxx],撰写最终答复的时候请直接引用 [SEARCH_REF:xxx] ,系统会自动在该位置渲染对应的商品卡片列表,无需复述搜索结果。
+ 2. 因为系统会自动将[SEARCH_REF:xxx]渲染为搜索结果,所以[SEARCH_REF:xxx]必须独占一行,且只在需要渲染该query完整的搜索结果时才进行引用,同一个结果不要重复引用。
+4. 今天是{datetime.now().strftime("%Y-%m-%d")},所有与当前时间(比如天气、最新或即将发生的事件)相关的问题,都要使用web_search工具)。
+"""
+
+SYSTEM_PROMPT___2 = """ 角色定义
你是我们店铺的一名专业的电商导购,是一个善于倾听、主动引导、懂得搭配的“时尚顾问”,通过有温度的对话,给用户提供有价值的信息,包括需求引导、方案推荐、搜索结果推荐,最终促成满意的购物决策或转化行为。
作为我们店铺的一名专业的销售,除了本店铺的商品的推荐,你可以给用户提供有帮助的信息,但是不要虚构商品、提供本商店搜索结果以外的商品。
@@ -48,6 +73,7 @@ SYSTEM_PROMPT = """ 角色定义
1. 可以生成多个query进行搜索:在需要搜索商品的时候,可以将需求分解为 2-4 个搜索查询,每个 query 聚焦一个明确的商品子类或搜索角度。
2. 可以根据搜索结果调整搜索策略:每次调用 search_products 后,工具会返回搜索结果的相关性的判断、以及搜索结果的topN的title,你需要决策是否要调整搜索策略,比如结果质量太差,可能需要调整搜索词、或者加大试探的query数量(不要超过3-5个)。
3. 使用 [SEARCH_REF:xxx] 内联引用搜索结果:搜索工具会返回一个结果引用标识[SEARCH_REF:xxx],撰写最终答复的时候可以直接引用将 [SEARCH_REF:xxx] ,系统会自动在该位置渲染对应的商品卡片列表,无需复述搜索结果。
+ 4. 因为系统会自动将[SEARCH_REF:xxx]渲染为搜索结果,所以只在需要渲染该query完整的搜索结果时才进行引用,同一个结果不要重复引用。
"""
@@ -81,9 +107,40 @@ def _extract_message_text(msg) -> str:
return str(content) if content else ""
+# 部分 API(如 DeepSeek)在 content 中返回 think 标签块,需去掉后只保留正式回复
+_RE_THINK_TAGS = re.compile(r".*?<\/think>", re.DOTALL | re.IGNORECASE)
+
+
+def _extract_formal_reply(msg) -> str:
+ """
+ 只截取大模型回复中的「正式结果」,去掉 thinking/reasoning 内容。
+ - 若 content 为 list(如 Responses API):只取 type 为 output_text/text 的块,跳过 reasoning。
+ - 若 content 为 str:去掉 think 标签及其内容。
+ """
+ content = getattr(msg, "content", "")
+ if isinstance(content, list):
+ parts = []
+ for block in content:
+ if not isinstance(block, dict):
+ continue
+ block_type = (block.get("type") or "").lower()
+ if block_type in ("reasoning",):
+ continue
+ text = block.get("text") or block.get("content") or ""
+ if text:
+ parts.append(text)
+ return "".join(str(p) for p in parts).strip()
+ if isinstance(content, str):
+ return _RE_THINK_TAGS.sub("", content).strip()
+ return str(content).strip() if content else ""
+
+
def _message_for_log(msg: BaseMessage) -> dict:
"""Serialize a message for structured logging (content truncated)."""
- text = _extract_message_text(msg)
+ if getattr(msg, "additional_kwargs", None) and "reasoning" in (msg.additional_kwargs or {}):
+ text = _extract_formal_reply(msg) or _extract_message_text(msg)
+ else:
+ text = _extract_message_text(msg)
if len(text) > _LOG_CONTENT_MAX:
text = text[:_LOG_CONTENT_MAX] + f"... [truncated, total {len(text)} chars]"
out: dict[str, Any] = {
@@ -106,13 +163,17 @@ class ShoppingAgent:
def __init__(self, session_id: Optional[str] = None):
self.session_id = session_id or "default"
- llm_kwargs = dict(
+ llm_kwargs: dict[str, Any] = dict(
model=settings.openai_model,
temperature=settings.openai_temperature,
api_key=settings.openai_api_key,
)
if settings.openai_api_base_url:
llm_kwargs["base_url"] = settings.openai_api_base_url
+ if getattr(settings, "openai_use_reasoning", False):
+ llm_kwargs["use_responses_api"] = True
+ effort = getattr(settings, "openai_reasoning_effort", "medium") or "medium"
+ llm_kwargs["model_kwargs"] = {"reasoning": {"effort": effort, "summary": "none"}}
self.llm = ChatOpenAI(**llm_kwargs)
@@ -246,7 +307,7 @@ class ShoppingAgent:
final_state = self.graph.get_state(config)
final_msg = final_state.values["messages"][-1]
- response_text = _extract_message_text(final_msg)
+ response_text = _extract_formal_reply(final_msg) or _extract_message_text(final_msg)
# Collect new SearchResults added during this turn
registry_after = global_registry.get_all(self.session_id)
@@ -292,7 +353,8 @@ class ShoppingAgent:
if getattr(msg, "type", None) in ("system", "tool"):
continue
role = "user" if msg.type == "human" else "assistant"
- result.append({"role": role, "content": _extract_message_text(msg)})
+ content = _extract_formal_reply(msg) or _extract_message_text(msg) if role == "assistant" else _extract_message_text(msg)
+ result.append({"role": role, "content": content})
return result
except Exception as e:
logger.error(f"get_conversation_history error: {e}")
diff --git a/app/config.py b/app/config.py
index 3ce61b4..9940c8c 100644
--- a/app/config.py
+++ b/app/config.py
@@ -33,6 +33,9 @@ class Settings(BaseSettings):
openai_vision_model: str = "qwen3-omni-flash"
openai_temperature: float = 0.7
openai_max_tokens: int = 1000
+ # 对话调用大模型时是否开启 thinking(需兼容 Responses API / reasoning 的模型,如 o1/o3/o4-mini)
+ openai_use_reasoning: bool = False
+ openai_reasoning_effort: str = "medium" # low | medium | high
# Base URL for OpenAI-compatible APIs (e.g. Qwen/DashScope)
# Qwen 北京: https://dashscope.aliyuncs.com/compatible-mode/v1
openai_api_base_url: Optional[str] = None
diff --git a/app/search_registry.py b/app/search_registry.py
index 48ffc3d..6db29ff 100644
--- a/app/search_registry.py
+++ b/app/search_registry.py
@@ -27,7 +27,7 @@ class ProductItem:
vendor: Optional[str] = None
image_url: Optional[str] = None
relevance_score: Optional[float] = None
- # LLM-assigned label: "Highly Relevant" | "Partially Relevant" | "Not Relevant"
+ # LLM-assigned label: "Relevant" | "Partially Relevant" | "Irrelevant"
match_label: str = "Partially Relevant"
tags: list = field(default_factory=list)
specifications: list = field(default_factory=list)
@@ -40,7 +40,7 @@ class SearchResult:
Identified by ref_id (e.g. 'sr_3f9a1b2c').
Stores the query, LLM quality assessment, and the curated product list
- (only "Highly Relevant" and "Partially Relevant" items — "Not Relevant" are discarded).
+ (only "Relevant" and "Partially Relevant" items — "Irrelevant" are discarded).
"""
ref_id: str
diff --git a/app/tools/search_tools.py b/app/tools/search_tools.py
index eae9878..1db6eff 100644
--- a/app/tools/search_tools.py
+++ b/app/tools/search_tools.py
@@ -2,7 +2,7 @@
Search Tools for Product Discovery
- search_products is created via make_search_products_tool(session_id, registry).
-- After search API, an LLM labels each result as Highly Relevant / Partially Relevant / Not Relevant; we count and
+- After search API, an LLM labels each result as Relevant / Partially Relevant / Irrelevant; we count and
store the curated list in the registry, return [SEARCH_REF:ref_id] + quality counts + top10 titles.
"""
@@ -74,7 +74,7 @@ def _assess_search_quality(query: str, raw_products: list) -> tuple[list[str], s
product_text = "\n".join(lines)
prompt = f"""评估以下搜索结果与用户查询的匹配程度,完成两件事:
-1. 为每条结果打一个等级:Highly Relevant / Partially Relevant / Not Relevant。
+1. 为每条结果打一个等级:Relevant / Partially Relevant / Irrelevant。
2. 写一段 quality_summary(1–2 句话):简要说明搜索结果主要包含哪些商品、是否基本满足搜索意图、整体匹配度如何。
用户查询:{query}
@@ -82,10 +82,10 @@ def _assess_search_quality(query: str, raw_products: list) -> tuple[list[str], s
搜索结果(共 {n} 条):
{product_text}
-等级说明:Highly Relevant=完全符合查询意图;Partially Relevant=基本相关(如品类等主需求匹配但部分属性不完全符合);Not Relevant=不相关。
+等级说明:Relevant=完全符合查询意图;Partially Relevant=基本相关(如品类等主需求匹配但部分属性不完全符合);Irrelevant=不相关。
请严格按以下 JSON 输出,仅输出 JSON,无其他内容:
-{{"labels": ["Highly Relevant", "Partially Relevant", "Not Relevant", ...], "quality_summary": "你的1-2句总结"}}
+{{"labels": ["Relevant", "Partially Relevant", "Irrelevant", ...], "quality_summary": "你的1-2句总结"}}
labels 数组长度必须等于 {n}。"""
try:
@@ -93,7 +93,7 @@ labels 数组长度必须等于 {n}。"""
resp = client.chat.completions.create(
model=settings.openai_model,
messages=[{"role": "user", "content": prompt}],
- max_tokens=700,
+ max_tokens=1200,
temperature=0.1,
)
raw = resp.choices[0].message.content.strip()
@@ -104,7 +104,7 @@ labels 数组长度必须等于 {n}。"""
raw = raw.strip()
data = json.loads(raw)
labels = data.get("labels", [])
- valid = {"Highly Relevant", "Partially Relevant", "Not Relevant"}
+ valid = {"Relevant", "Partially Relevant", "Irrelevant"}
labels = [l if l in valid else "Partially Relevant" for l in labels]
while len(labels) < n:
labels.append("Partially Relevant")
@@ -133,14 +133,14 @@ def make_search_products_tool(
@tool
def search_products(query: str, limit: int = 20) -> str:
- """搜索商品库并做质量评估:LLM 为每条结果打等级(Highly Relevant / Partially Relevant / Not Relevant),返回引用与 top10 标题。
+ """搜索商品库并做质量评估:LLM 为每条结果打等级(Relevant / Partially Relevant / Irrelevant),返回引用与 top10 标题。
Args:
query: 自然语言商品描述
limit: 最多返回条数(1-20)
Returns:
- 【搜索完成】+ 结果引用 [SEARCH_REF:ref_id] + 质量情况(评估条数、Highly/Partially Relevant 数)+ results list(top10 标题)
+ 【搜索完成】+ 结果引用 [SEARCH_REF:ref_id] + 质量情况(评估条数、Relevant/Partially Relevant 数)+ results list(top10 标题)
"""
try:
logger.info(f"[{session_id}] search_products: query={query!r} limit={limit}")
@@ -176,13 +176,13 @@ def make_search_products_tool(
)
labels, quality_summary = _assess_search_quality(query, raw_results)
- perfect_count = sum(1 for l in labels if l == "Highly Relevant")
+ perfect_count = sum(1 for l in labels if l == "Relevant")
partial_count = sum(1 for l in labels if l == "Partially Relevant")
irrelevant_count = len(labels) - perfect_count - partial_count
products: list[ProductItem] = []
for raw, label in zip(raw_results, labels):
- if label not in ("Highly Relevant", "Partially Relevant"):
+ if label not in ("Relevant", "Partially Relevant"):
continue
products.append(
ProductItem(
@@ -229,7 +229,7 @@ def make_search_products_tool(
return (
f"【搜索完成】query='{query}'\n"
f"结果引用:[SEARCH_REF:{ref_id}]\n"
- f"搜索结果质量情况:评估总条数{assessed_n}条,Highly Relevant {perfect_count} 条,Partially Relevant {partial_count} 条。\n"
+ f"搜索结果质量情况:评估总条数{assessed_n}条,Relevant {perfect_count} 条,Partially Relevant {partial_count} 条。\n"
f"results list:\n{results_list}"
)
@@ -251,7 +251,7 @@ def web_search(query: str) -> str:
触发场景:
- 需要**外部知识**:流行趋势、品牌、搭配文化、节日习俗等
- - 需要**实时/及时信息**:当季流行元素、某地未来的天气
+ - 需要**实时/及时信息**:所有与天气相关的问题、当季流行元素、某地近期或者未来的事件、所有依赖当前时间相关的信息
- 需要**宏观参考**:不同场合/国家的穿着建议、选购攻略
Args:
@@ -369,7 +369,7 @@ def analyze_image_style(image_path: str) -> str:
],
}
],
- max_tokens=500,
+ max_tokens=800,
temperature=0.3,
)
--
libgit2 0.21.2