benchmark_translation_local_models.py 15.7 KB
#!/usr/bin/env python3
"""Benchmark local translation models with products_analyzed.csv."""

from __future__ import annotations

import argparse
import copy
import csv
import json
import math
import platform
import resource
import statistics
import subprocess
import sys
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, List

import torch
import transformers

PROJECT_ROOT = Path(__file__).resolve().parent.parent
if str(PROJECT_ROOT) not in sys.path:
    sys.path.insert(0, str(PROJECT_ROOT))

from config.services_config import get_translation_config  # noqa: E402
from translation.service import TranslationService  # noqa: E402
from translation.settings import get_translation_capability  # noqa: E402


SCENARIOS: List[Dict[str, str]] = [
    {
        "name": "nllb-200-distilled-600m zh->en",
        "model": "nllb-200-distilled-600m",
        "source_lang": "zh",
        "target_lang": "en",
        "column": "title_cn",
        "scene": "sku_name",
    },
    {
        "name": "nllb-200-distilled-600m en->zh",
        "model": "nllb-200-distilled-600m",
        "source_lang": "en",
        "target_lang": "zh",
        "column": "title",
        "scene": "sku_name",
    },
    {
        "name": "opus-mt-zh-en zh->en",
        "model": "opus-mt-zh-en",
        "source_lang": "zh",
        "target_lang": "en",
        "column": "title_cn",
        "scene": "sku_name",
    },
    {
        "name": "opus-mt-en-zh en->zh",
        "model": "opus-mt-en-zh",
        "source_lang": "en",
        "target_lang": "zh",
        "column": "title",
        "scene": "sku_name",
    },
]


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Benchmark local translation models")
    parser.add_argument("--csv-path", default="products_analyzed.csv", help="Benchmark dataset CSV path")
    parser.add_argument("--limit", type=int, default=0, help="Limit rows for faster experiments; 0 means all")
    parser.add_argument("--output-dir", default="", help="Directory for JSON/Markdown reports")
    parser.add_argument("--single", action="store_true", help="Run a single scenario in-process")
    parser.add_argument("--model", default="", help="Model name for --single mode")
    parser.add_argument("--source-lang", default="", help="Source language for --single mode")
    parser.add_argument("--target-lang", default="", help="Target language for --single mode")
    parser.add_argument("--column", default="", help="CSV column to benchmark for --single mode")
    parser.add_argument("--scene", default="sku_name", help="Scene passed to translation service")
    parser.add_argument("--batch-size", type=int, default=0, help="Override configured batch size")
    parser.add_argument("--device-override", default="", help="Override configured device, for example cpu or cuda")
    parser.add_argument("--torch-dtype-override", default="", help="Override configured torch dtype, for example float32 or float16")
    parser.add_argument("--warmup-batches", type=int, default=1, help="Warmup batches before measuring")
    return parser.parse_args()


def load_texts(csv_path: Path, column: str, limit: int) -> List[str]:
    texts: List[str] = []
    with csv_path.open("r", encoding="utf-8") as handle:
        reader = csv.DictReader(handle)
        for row in reader:
            value = (row.get(column) or "").strip()
            if value:
                texts.append(value)
            if limit > 0 and len(texts) >= limit:
                break
    if not texts:
        raise ValueError(f"No non-empty texts found in column '{column}' from {csv_path}")
    return texts


def batched(values: List[str], batch_size: int) -> Iterable[List[str]]:
    for start in range(0, len(values), batch_size):
        yield values[start:start + batch_size]


def percentile(values: List[float], p: float) -> float:
    if not values:
        return 0.0
    ordered = sorted(values)
    if len(values) == 1:
        return float(ordered[0])
    idx = (len(ordered) - 1) * p
    lower = math.floor(idx)
    upper = math.ceil(idx)
    if lower == upper:
        return float(ordered[lower])
    return float(ordered[lower] + (ordered[upper] - ordered[lower]) * (idx - lower))


def resolve_output_dir(output_dir: str) -> Path:
    if output_dir:
        path = Path(output_dir)
    else:
        path = PROJECT_ROOT / "perf_reports" / datetime.now().strftime("%Y%m%d") / "translation_local_models"
    path.mkdir(parents=True, exist_ok=True)
    return path


def build_environment_info() -> Dict[str, Any]:
    gpu_name = None
    gpu_total_mem_gb = None
    if torch.cuda.is_available():
        gpu_name = torch.cuda.get_device_name(0)
        props = torch.cuda.get_device_properties(0)
        gpu_total_mem_gb = round(props.total_memory / (1024 ** 3), 2)
    return {
        "python": platform.python_version(),
        "torch": torch.__version__,
        "transformers": transformers.__version__,
        "cuda_available": torch.cuda.is_available(),
        "gpu_name": gpu_name,
        "gpu_total_mem_gb": gpu_total_mem_gb,
        "platform": platform.platform(),
    }


def benchmark_single_scenario(args: argparse.Namespace) -> Dict[str, Any]:
    csv_path = (PROJECT_ROOT / args.csv_path).resolve() if not Path(args.csv_path).is_absolute() else Path(args.csv_path)
    config = copy.deepcopy(get_translation_config())
    capability = get_translation_capability(config, args.model, require_enabled=False)
    if args.device_override:
        capability["device"] = args.device_override
    if args.torch_dtype_override:
        capability["torch_dtype"] = args.torch_dtype_override
    if args.batch_size:
        capability["batch_size"] = args.batch_size
    config["capabilities"][args.model] = capability
    configured_batch_size = int(capability.get("batch_size") or 1)
    batch_size = configured_batch_size
    texts = load_texts(csv_path, args.column, args.limit)

    service = TranslationService(config)
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats()

    load_start = time.perf_counter()
    backend = service.get_backend(args.model)
    load_seconds = time.perf_counter() - load_start

    warmup_batches = min(max(args.warmup_batches, 0), max(1, math.ceil(len(texts) / batch_size)))
    for batch in list(batched(texts, batch_size))[:warmup_batches]:
        service.translate(
            text=batch,
            source_lang=args.source_lang,
            target_lang=args.target_lang,
            model=args.model,
            scene=args.scene,
        )

    batch_latencies_ms: List[float] = []
    success_count = 0
    failure_count = 0
    output_chars = 0
    total_input_chars = sum(len(text) for text in texts)
    measured_batches = list(batched(texts, batch_size))

    start = time.perf_counter()
    for batch in measured_batches:
        batch_start = time.perf_counter()
        outputs = service.translate(
            text=batch,
            source_lang=args.source_lang,
            target_lang=args.target_lang,
            model=args.model,
            scene=args.scene,
        )
        elapsed_ms = (time.perf_counter() - batch_start) * 1000
        batch_latencies_ms.append(elapsed_ms)

        if not isinstance(outputs, list):
            raise RuntimeError(f"Expected list output for batch translation, got {type(outputs)!r}")
        for item in outputs:
            if item is None:
                failure_count += 1
            else:
                success_count += 1
                output_chars += len(item)
    translate_seconds = time.perf_counter() - start

    peak_gpu_mem_gb = None
    peak_gpu_reserved_gb = None
    if torch.cuda.is_available():
        peak_gpu_mem_gb = round(torch.cuda.max_memory_allocated() / (1024 ** 3), 3)
        peak_gpu_reserved_gb = round(torch.cuda.max_memory_reserved() / (1024 ** 3), 3)

    max_rss_mb = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024, 2)
    total_items = len(texts)

    return {
        "scenario": {
            "name": f"{args.model} {args.source_lang}->{args.target_lang}",
            "model": args.model,
            "source_lang": args.source_lang,
            "target_lang": args.target_lang,
            "column": args.column,
            "scene": args.scene,
        },
        "dataset": {
            "csv_path": str(csv_path),
            "rows": total_items,
            "input_chars": total_input_chars,
        },
        "runtime": {
            "device": str(getattr(backend, "device", capability.get("device", "unknown"))),
            "torch_dtype": str(getattr(backend, "torch_dtype", capability.get("torch_dtype", "unknown"))),
            "configured_batch_size": configured_batch_size,
            "used_batch_size": batch_size,
            "warmup_batches": warmup_batches,
            "load_seconds": round(load_seconds, 4),
            "translate_seconds": round(translate_seconds, 4),
            "total_seconds": round(load_seconds + translate_seconds, 4),
            "batch_count": len(batch_latencies_ms),
            "first_batch_ms": round(batch_latencies_ms[0], 2),
            "batch_latency_p50_ms": round(percentile(batch_latencies_ms, 0.50), 2),
            "batch_latency_p95_ms": round(percentile(batch_latencies_ms, 0.95), 2),
            "batch_latency_max_ms": round(max(batch_latencies_ms), 2),
            "avg_batch_latency_ms": round(statistics.fmean(batch_latencies_ms), 2),
            "avg_item_latency_ms": round((translate_seconds / total_items) * 1000, 3),
            "items_per_second": round(total_items / translate_seconds, 2),
            "input_chars_per_second": round(total_input_chars / translate_seconds, 2),
            "output_chars_per_second": round(output_chars / translate_seconds, 2),
            "success_count": success_count,
            "failure_count": failure_count,
            "success_rate": round(success_count / total_items, 6),
            "max_rss_mb": max_rss_mb,
            "peak_gpu_memory_gb": peak_gpu_mem_gb,
            "peak_gpu_reserved_gb": peak_gpu_reserved_gb,
        },
    }


def run_all_scenarios(args: argparse.Namespace) -> Dict[str, Any]:
    report = {
        "generated_at": datetime.now().isoformat(timespec="seconds"),
        "environment": build_environment_info(),
        "scenarios": [],
    }

    for scenario in SCENARIOS:
        cmd = [
            sys.executable,
            str(Path(__file__).resolve()),
            "--single",
            "--csv-path",
            args.csv_path,
            "--model",
            scenario["model"],
            "--source-lang",
            scenario["source_lang"],
            "--target-lang",
            scenario["target_lang"],
            "--column",
            scenario["column"],
            "--scene",
            scenario["scene"],
            "--warmup-batches",
            str(args.warmup_batches),
        ]
        if args.limit:
            cmd.extend(["--limit", str(args.limit)])
        if args.batch_size:
            cmd.extend(["--batch-size", str(args.batch_size)])
        if args.device_override:
            cmd.extend(["--device-override", args.device_override])
        if args.torch_dtype_override:
            cmd.extend(["--torch-dtype-override", args.torch_dtype_override])

        completed = subprocess.run(cmd, capture_output=True, text=True, check=True)
        result_line = ""
        for line in reversed(completed.stdout.splitlines()):
            if line.startswith("JSON_RESULT="):
                result_line = line
                break
        if not result_line:
            raise RuntimeError(f"Scenario output missing JSON_RESULT marker:\n{completed.stdout}\n{completed.stderr}")
        payload = json.loads(result_line.split("=", 1)[1])
        payload["scenario"]["name"] = scenario["name"]
        report["scenarios"].append(payload)

    return report


def render_markdown_report(report: Dict[str, Any]) -> str:
    lines = [
        "# Local Translation Model Benchmark",
        "",
        f"- Generated at: `{report['generated_at']}`",
        f"- Python: `{report['environment']['python']}`",
        f"- Torch: `{report['environment']['torch']}`",
        f"- Transformers: `{report['environment']['transformers']}`",
        f"- CUDA: `{report['environment']['cuda_available']}`",
    ]
    if report["environment"]["gpu_name"]:
        lines.append(f"- GPU: `{report['environment']['gpu_name']}` ({report['environment']['gpu_total_mem_gb']} GiB)")
    lines.extend(
        [
            "",
            "| Scenario | Items/s | Avg item ms | Batch p50 ms | Batch p95 ms | Load s | Peak GPU GiB | Success |",
            "|---|---:|---:|---:|---:|---:|---:|---:|",
        ]
    )
    for item in report["scenarios"]:
        runtime = item["runtime"]
        lines.append(
            "| {name} | {items_per_second} | {avg_item_latency_ms} | {batch_latency_p50_ms} | {batch_latency_p95_ms} | {load_seconds} | {peak_gpu_memory_gb} | {success_rate} |".format(
                name=item["scenario"]["name"],
                items_per_second=runtime["items_per_second"],
                avg_item_latency_ms=runtime["avg_item_latency_ms"],
                batch_latency_p50_ms=runtime["batch_latency_p50_ms"],
                batch_latency_p95_ms=runtime["batch_latency_p95_ms"],
                load_seconds=runtime["load_seconds"],
                peak_gpu_memory_gb=runtime["peak_gpu_memory_gb"],
                success_rate=runtime["success_rate"],
            )
        )

    lines.append("")
    for item in report["scenarios"]:
        runtime = item["runtime"]
        dataset = item["dataset"]
        lines.extend(
            [
                f"## {item['scenario']['name']}",
                "",
                f"- Dataset rows: `{dataset['rows']}` from column `{item['scenario']['column']}`",
                f"- Direction: `{item['scenario']['source_lang']} -> {item['scenario']['target_lang']}`",
                f"- Batch size: configured `{runtime['configured_batch_size']}`, used `{runtime['used_batch_size']}`",
                f"- Load time: `{runtime['load_seconds']} s`",
                f"- Translate time: `{runtime['translate_seconds']} s`",
                f"- Throughput: `{runtime['items_per_second']} items/s`, `{runtime['input_chars_per_second']} input chars/s`",
                f"- Latency: avg item `{runtime['avg_item_latency_ms']} ms`, batch p50 `{runtime['batch_latency_p50_ms']} ms`, batch p95 `{runtime['batch_latency_p95_ms']} ms`, batch max `{runtime['batch_latency_max_ms']} ms`",
                f"- Memory: max RSS `{runtime['max_rss_mb']} MB`, peak GPU allocated `{runtime['peak_gpu_memory_gb']} GiB`, peak GPU reserved `{runtime['peak_gpu_reserved_gb']} GiB`",
                f"- Success: `{runtime['success_count']}/{dataset['rows']}`",
                "",
            ]
        )
    return "\n".join(lines)


def main() -> None:
    args = parse_args()
    if args.single:
        result = benchmark_single_scenario(args)
        print("JSON_RESULT=" + json.dumps(result, ensure_ascii=False))
        return

    report = run_all_scenarios(args)
    output_dir = resolve_output_dir(args.output_dir)
    timestamp = datetime.now().strftime("%H%M%S")
    json_path = output_dir / f"translation_local_models_{timestamp}.json"
    md_path = output_dir / f"translation_local_models_{timestamp}.md"
    json_path.write_text(json.dumps(report, ensure_ascii=False, indent=2), encoding="utf-8")
    md_path.write_text(render_markdown_report(report), encoding="utf-8")

    print(f"JSON report: {json_path}")
    print(f"Markdown report: {md_path}")
    for item in report["scenarios"]:
        runtime = item["runtime"]
        print(
            f"{item['scenario']['name']}: "
            f"{runtime['items_per_second']} items/s | "
            f"avg_item={runtime['avg_item_latency_ms']} ms | "
            f"p95_batch={runtime['batch_latency_p95_ms']} ms | "
            f"load={runtime['load_seconds']} s"
        )


if __name__ == "__main__":
    main()