benchmark_reranker_1000docs.sh 3.29 KB
#!/bin/bash
#
# Benchmark reranker for e-commerce short-text workload:
# - query <= ~100 tokens
# - docs are short title / title+brief
# - one request contains ~1000 docs
#
# Outputs JSON reports under perf_reports/<date>/reranker_1000docs/
#
# Usage:
#   ./scripts/benchmark_reranker_1000docs.sh
# Optional env:
#   BATCH_SIZES="24 32 48 64"
#   C1_REQUESTS=4
#   C4_REQUESTS=8
#   TENANT_ID=162
#
set -euo pipefail

PROJECT_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
cd "${PROJECT_ROOT}"

TENANT_ID="${TENANT_ID:-162}"
BATCH_SIZES="${BATCH_SIZES:-24 32 48 64}"
C1_REQUESTS="${C1_REQUESTS:-4}"
C4_REQUESTS="${C4_REQUESTS:-8}"
TIMEOUT_SEC="${TIMEOUT_SEC:-240}"
RERANK_BASE="${RERANK_BASE:-http://127.0.0.1:6007}"

DATE_TAG="$(date +%Y%m%d)"
OUT_DIR="perf_reports/${DATE_TAG}/reranker_1000docs"
TMP_CASES="/tmp/rerank_1000_shortdocs_cases.json"
mkdir -p "${OUT_DIR}"

cleanup() {
  ./scripts/service_ctl.sh stop reranker >/dev/null 2>&1 || true
}
trap cleanup EXIT

cat > "${TMP_CASES}" <<'JSON'
{
  "scenarios": {
    "rerank": [
      {
        "method": "POST",
        "path": "/rerank",
        "json": {
          "query": "wireless ergonomic gaming mouse for office use with rechargeable battery and bluetooth",
          "docs": [],
          "normalize": true
        }
      }
    ]
  }
}
JSON

python3 - <<'PY'
import json
from pathlib import Path

p = Path("/tmp/rerank_1000_shortdocs_cases.json")
d = json.loads(p.read_text(encoding="utf-8"))
docs = []
for i in range(1000):
    if i % 3 == 0:
        doc = f"wireless mouse model {i} ergonomic grip 2.4g bluetooth"
    elif i % 3 == 1:
        doc = f"gaming mouse {i} rgb lightweight high precision sensor"
    else:
        doc = f"office mouse {i} rechargeable silent click compact"
    if i % 5 == 0:
        doc += " with usb receiver"
    if i % 7 == 0:
        doc += " long battery life"
    docs.append(doc)

d["scenarios"]["rerank"][0]["json"]["docs"] = docs
p.write_text(json.dumps(d, ensure_ascii=False), encoding="utf-8")
print(f"[info] generated docs={len(docs)} at {p}")
PY

run_bench() {
  local bs="$1"
  local c="$2"
  local req="$3"
  local out="${OUT_DIR}/rerank_bs${bs}_c${c}_r${req}.json"
  .venv/bin/python scripts/perf_api_benchmark.py \
    --scenario rerank \
    --tenant-id "${TENANT_ID}" \
    --reranker-base "${RERANK_BASE}" \
    --cases-file "${TMP_CASES}" \
    --concurrency "${c}" \
    --max-requests "${req}" \
    --timeout "${TIMEOUT_SEC}" \
    --output "${out}" >/dev/null
  python3 - <<PY
import json
p="${out}"
d=json.load(open(p))
r=d["results"][0]
lat=r["latency_ms"]
print(f"[result] bs=${bs} c=${c} req=${req} avg={lat['avg']}ms p95={lat['p95']}ms rps={r['throughput_rps']}")
PY
}

for bs in ${BATCH_SIZES}; do
  echo "[info] benchmarking infer_batch_size=${bs}"
  cleanup
  RERANK_VLLM_INFER_BATCH_SIZE="${bs}" \
  RERANK_VLLM_SORT_BY_DOC_LENGTH="true" \
  nohup ./scripts/start_reranker.sh >"${OUT_DIR}/start_bs${bs}.log" 2>&1 &

  for i in $(seq 1 180); do
    if curl -sf "${RERANK_BASE}/health" >/dev/null 2>&1; then
      break
    fi
    sleep 1
    if [ "${i}" -eq 180 ]; then
      echo "[error] reranker startup timeout for bs=${bs}" >&2
      tail -n 80 "${OUT_DIR}/start_bs${bs}.log" >&2 || true
      exit 1
    fi
  done

  run_bench "${bs}" 1 "${C1_REQUESTS}"
  run_bench "${bs}" 4 "${C4_REQUESTS}"
done

echo "[info] benchmark done: ${OUT_DIR}"