We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent ab228ec commit b66d76eCopy full SHA for b66d76e
vllm-benchmarks/run.sh
@@ -41,11 +41,11 @@ build_vllm() {
41
SCCACHE_CACHE_SIZE=100G sccache --start-server || true
42
# Build and install vLLM
43
if command -v nvidia-smi; then
44
- pip install -r requirements-build.txt
+ pip install -r requirements/build.txt
45
pip install --editable .
46
elif command -v amd-smi; then
47
- pip install -r requirements-rocm.txt
48
- pip install -r requirements-rocm-build.txt
+ pip install -r requirements/rocm.txt
+ pip install -r requirements/rocm-build.txt
49
# https://docs.vllm.ai/en/latest/getting_started/installation/gpu/index.html?device=rocm
50
PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py develop
51
fi
0 commit comments