start_eval.sh 1.65 KB
#!/usr/bin/env bash
# Search evaluation quick entrypoints. Run from any cwd; resolves repo root.
set -euo pipefail

ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
cd "$ROOT"
PY="${ROOT}/.venv/bin/python"
TENANT_ID="${TENANT_ID:-163}"
QUERIES="${REPO_EVAL_QUERIES:-scripts/evaluation/queries/queries.txt}"

usage() {
  echo "Usage: $0 batch|batch-rebuild|serve"
  echo "  batch          โ€” batch eval: live search every query, LLM only for missing labels (top_k=50)"
  echo "  batch-rebuild  โ€” deep rebuild: build --force-refresh-labels (search recall pool + full-corpus rerank + batched LLM; expensive)"
  echo "  serve          โ€” eval UI (default http://0.0.0.0:\${EVAL_WEB_PORT:-6010}/; also: ./scripts/start_eval_web.sh)"
  echo "Env: TENANT_ID (default 163), REPO_EVAL_QUERIES, EVAL_WEB_HOST, EVAL_WEB_PORT (default 6010)"
}

case "${1:-}" in
  batch)
    exec "$PY" scripts/evaluation/build_annotation_set.py batch \
      --tenant-id "$TENANT_ID" \
      --queries-file "$QUERIES" \
      --top-k 50 \
      --language en
    ;;
  batch-rebuild)
    exec "$PY" scripts/evaluation/build_annotation_set.py build \
      --tenant-id "$TENANT_ID" \
      --queries-file "$QUERIES" \
      --search-depth 500 \
      --rerank-depth 10000 \
      --reset-artifacts \
      --force-refresh-rerank \
      --force-refresh-labels \
      --language en
    ;;
  serve)
    EVAL_WEB_PORT="${EVAL_WEB_PORT:-6010}"
    EVAL_WEB_HOST="${EVAL_WEB_HOST:-0.0.0.0}"
    exec "$PY" scripts/evaluation/serve_eval_web.py serve \
      --tenant-id "$TENANT_ID" \
      --queries-file "$QUERIES" \
      --host "$EVAL_WEB_HOST" \
      --port "$EVAL_WEB_PORT"
    ;;
  *)
    usage
    exit 1
    ;;
esac