quick_start_eval.sh 1.61 KB
#!/usr/bin/env bash
# Search evaluation quick entrypoints. Run from any cwd; resolves repo root.
set -euo pipefail

ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
cd "$ROOT"
PY="${ROOT}/.venv/bin/python"
TENANT_ID="${TENANT_ID:-163}"
QUERIES="${REPO_EVAL_QUERIES:-scripts/evaluation/queries/queries.txt}"

usage() {
  echo "Usage: $0 batch|batch-rebuild|serve"
  echo "  batch          — batch eval: live search every query, LLM only for missing labels (top_k=50, simple)"
  echo "  batch-rebuild  — same as batch but --force-refresh-labels (re-LLM all top_k hits; expensive, overwrites cache)"
  echo "  serve          — eval UI (default http://0.0.0.0:\${EVAL_WEB_PORT:-6010}/; also: ./scripts/start_eval_web.sh)"
  echo "Env: TENANT_ID (default 163), REPO_EVAL_QUERIES, EVAL_WEB_HOST, EVAL_WEB_PORT (default 6010)"
}

case "${1:-}" in
  batch)
    exec "$PY" scripts/evaluation/build_annotation_set.py batch \
      --tenant-id "$TENANT_ID" \
      --queries-file "$QUERIES" \
      --top-k 50 \
      --language en \
      --labeler-mode simple
    ;;
  batch-rebuild)
    exec "$PY" scripts/evaluation/build_annotation_set.py batch \
      --tenant-id "$TENANT_ID" \
      --queries-file "$QUERIES" \
      --top-k 50 \
      --language en \
      --labeler-mode simple \
      --force-refresh-labels
    ;;
  serve)
    EVAL_WEB_PORT="${EVAL_WEB_PORT:-6010}"
    EVAL_WEB_HOST="${EVAL_WEB_HOST:-0.0.0.0}"
    exec "$PY" scripts/evaluation/serve_eval_web.py serve \
      --tenant-id "$TENANT_ID" \
      --queries-file "$QUERIES" \
      --host "$EVAL_WEB_HOST" \
      --port "$EVAL_WEB_PORT"
    ;;
  *)
    usage
    exit 1
    ;;
esac