# Isolated dependencies for qwen3_transformers_packed reranker backend. # # Keep this stack aligned with the validated CUDA runtime on our hosts. # On this machine, torch 2.11.0 + cu130 fails CUDA init, while torch 2.10.0 + cu128 works. # We also cap transformers <5 to stay on the same family as the working vLLM score env. -r requirements_reranker_qwen3_transformers.txt torch==2.10.0 transformers>=4.51.0,<5