@@ -12,9 +12,6 @@ cleanup() {
12
12
if [[ " ${CLEANUP_BENCHMARK_RESULTS:- 1} " == " 1" ]]; then
13
13
rm -rf vllm/benchmarks/results
14
14
fi
15
-
16
- # https://github.com/vllm-project/vllm/issues/13392
17
- rm -rf ~ /.cache/vllm/torch_compile_cache
18
15
}
19
16
20
17
setup_vllm () {
@@ -43,8 +40,15 @@ build_vllm() {
43
40
# TODO (huydhn) I'll setup remote cache for this later
44
41
SCCACHE_CACHE_SIZE=100G sccache --start-server || true
45
42
# Build and install vLLM
46
- pip install -r requirements-build.txt
47
- pip install --editable .
43
+ if command -v nvidia-smi; then
44
+ pip install -r requirements/build.txt
45
+ pip install --editable .
46
+ elif command -v amd-smi; then
47
+ pip install -r requirements/rocm.txt
48
+ pip install -r requirements/rocm-build.txt
49
+ # https://docs.vllm.ai/en/latest/getting_started/installation/gpu/index.html?device=rocm
50
+ PYTORCH_ROCM_ARCH=" gfx90a;gfx942" python setup.py develop
51
+ fi
48
52
popd
49
53
}
50
54
@@ -65,19 +69,22 @@ run_benchmark() {
65
69
upload_results () {
66
70
if [[ " ${UPLOAD_BENCHMARK_RESULTS:- 1} " == " 1" ]]; then
67
71
# Upload the benchmark results
68
- python upload_benchmark_results.py --vllm vllm --benchmark-results vllm/benchmarks/results
72
+ python upload_benchmark_results.py \
73
+ --vllm vllm \
74
+ --benchmark-results vllm/benchmarks/results \
75
+ --device " ${GPU_DEVICE} "
69
76
70
77
pushd vllm
71
78
if [[ -f benchmarks/results/benchmark_results.md ]]; then
72
79
# Upload the markdown file
73
- S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /benchmark_results.md"
80
+ S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /${GPU_DEVICE} / benchmark_results.md"
74
81
aws s3 cp --acl public-read \
75
82
benchmarks/results/benchmark_results.md " s3://ossci-benchmarks/${S3_PATH} "
76
83
fi
77
84
78
85
if [[ -f benchmarks.log ]]; then
79
86
# Upload the logs
80
- S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /benchmarks.log"
87
+ S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /${GPU_DEVICE} / benchmarks.log"
81
88
aws s3 cp --acl public-read \
82
89
benchmarks.log " s3://ossci-benchmarks/${S3_PATH} "
83
90
fi
@@ -99,7 +106,13 @@ pushd vllm
99
106
export HEAD_BRANCH=main
100
107
export HEAD_SHA=$( git rev-parse --verify HEAD)
101
108
102
- S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /benchmark_results.json"
109
+ if command -v nvidia-smi; then
110
+ declare -g GPU_DEVICE=$( nvidia-smi -i 0 --query-gpu=name --format=csv,noheader | awk ' {print $2}' )
111
+ elif command -v amd-smi; then
112
+ declare -g GPU_DEVICE=$( amd-smi static -g 0 -a | grep ' MARKET_NAME' | awk ' {print $2}' )
113
+ fi
114
+
115
+ S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /${GPU_DEVICE} /benchmark_results.json"
103
116
aws s3api head-object --bucket ossci-benchmarks --key ${S3_PATH} || NOT_EXIST=1
104
117
105
118
if [[ ${NOT_EXIST:- 0} == " 0" && " ${OVERWRITE_BENCHMARK_RESULTS:- 0} " != " 1" ]]; then
0 commit comments