constants.py 3.71 KB
"""Paths and shared constants for search evaluation."""

from pathlib import Path

_PKG_DIR = Path(__file__).resolve().parent
_SCRIPTS_EVAL_DIR = _PKG_DIR.parent
PROJECT_ROOT = _SCRIPTS_EVAL_DIR.parents[1]

# Canonical English labels (must match LLM prompt output in prompts._CLASSIFY_TEMPLATE_EN)
RELEVANCE_LV3 = "Fully Relevant"
RELEVANCE_LV2 = "Mostly Relevant"
RELEVANCE_LV1 = "Weakly Relevant"
RELEVANCE_LV0 = "Irrelevant"

VALID_LABELS = frozenset({RELEVANCE_LV3, RELEVANCE_LV2, RELEVANCE_LV1, RELEVANCE_LV0})

# Useful label sets for binary diagnostic slices layered on top of graded ranking metrics.
RELEVANCE_NON_IRRELEVANT = frozenset({RELEVANCE_LV3, RELEVANCE_LV2, RELEVANCE_LV1})
RELEVANCE_STRONG = frozenset({RELEVANCE_LV3, RELEVANCE_LV2})

# Graded relevance for ranking evaluation.
# We use rel grades 3/2/1/0 and gain = 2^rel - 1, which is standard for NDCG-style metrics.
RELEVANCE_GRADE_MAP = {
    RELEVANCE_LV3: 3,
    RELEVANCE_LV2: 2,
    RELEVANCE_LV1: 1,
    RELEVANCE_LV0: 0,
}
# 标准的gain计算方法:2^rel - 1
# 但是是因为标注质量不是特别精确,因此适当降低 exact 和 high 的区分度
RELEVANCE_GAIN_MAP = {
    # label: (2 ** grade) - 1
    label: grade
    for label, grade in RELEVANCE_GRADE_MAP.items()
}

# P(stop | relevance) for ERR (Expected Reciprocal Rank); cascade model (Chapelle et al., 2009).
STOP_PROB_MAP = {
    RELEVANCE_LV3: 0.99,
    RELEVANCE_LV2: 0.8,
    RELEVANCE_LV1: 0.1,
    RELEVANCE_LV0: 0.0,
}

DEFAULT_ARTIFACT_ROOT = PROJECT_ROOT / "artifacts" / "search_evaluation"
DEFAULT_QUERY_FILE = _SCRIPTS_EVAL_DIR / "queries" / "queries.txt"

# Logging (``build_annotation_set.py`` / ``serve_eval_web.py`` → ``eval_framework.cli.main``)
EVAL_LOG_DIR = PROJECT_ROOT / "logs"
EVAL_VERBOSE_LOG_DIR = EVAL_LOG_DIR / "verbose"
EVAL_LOG_FILE = EVAL_LOG_DIR / "eval.log"
EVAL_VERBOSE_LOG_FILE = EVAL_VERBOSE_LOG_DIR / "eval_verbose.log"

# Judge LLM (eval_framework only; override via CLI --judge-model / constructor kwargs)
DEFAULT_JUDGE_MODEL = "qwen3.5-plus"
DEFAULT_JUDGE_ENABLE_THINKING = False
DEFAULT_JUDGE_DASHSCOPE_BATCH = False

# Query-intent LLM (separate from judge; used once per query, injected into relevance prompts)
DEFAULT_INTENT_MODEL = "qwen3-max"
DEFAULT_INTENT_ENABLE_THINKING = True
DEFAULT_JUDGE_BATCH_COMPLETION_WINDOW = "24h"
DEFAULT_JUDGE_BATCH_POLL_INTERVAL_SEC = 10.0

# --- Rebuild annotation pool (``build --force-refresh-labels``) ---
# Flow: search recall pool (rerank_score=1, no rerank API) + rerank rest of corpus +
# LLM labels in fixed-size batches along global order (see ``framework._annotate_rebuild_batches``).
DEFAULT_SEARCH_RECALL_TOP_K = 200
DEFAULT_RERANK_HIGH_THRESHOLD = 0.5
DEFAULT_RERANK_HIGH_SKIP_COUNT = 1000
DEFAULT_REBUILD_LLM_BATCH_SIZE = 50
# At least this many LLM batches run before early-stop is considered.
DEFAULT_REBUILD_MIN_LLM_BATCHES = 10
# Hard cap on LLM batches per query (each batch labels up to ``DEFAULT_REBUILD_LLM_BATCH_SIZE`` docs).
DEFAULT_REBUILD_MAX_LLM_BATCHES = 40

# LLM early-stop (only after ``DEFAULT_REBUILD_MIN_LLM_BATCHES`` completed):
# A batch is "bad" when **both** hold (strict inequalities; see ``framework._annotate_rebuild_batches``):
#   - irrelevant_ratio > DEFAULT_REBUILD_IRRELEVANT_STOP_RATIO  (default 93.9%),
#   - (Irrelevant + Weakly Relevant) / n > DEFAULT_REBUILD_IRREL_LOW_COMBINED_STOP_RATIO  (default 95.9%).
# ``irrelevant_ratio`` = Irrelevant count / n; weak relevance is ``RELEVANCE_LV1`` ("Weakly Relevant").
# Increment streak on consecutive bad batches; reset on any non-bad batch. Stop when streak
# reaches ``DEFAULT_REBUILD_IRRELEVANT_STOP_STREAK`` (default 3).
DEFAULT_REBUILD_IRRELEVANT_STOP_RATIO = 0.799
DEFAULT_REBUILD_IRREL_LOW_COMBINED_STOP_RATIO = 0.959
DEFAULT_REBUILD_IRRELEVANT_STOP_STREAK = 3