@@ -12,9 +12,6 @@ cleanup() {
1212 if [[ " ${CLEANUP_BENCHMARK_RESULTS:- 1} " == " 1" ]]; then
1313 rm -rf vllm/benchmarks/results
1414 fi
15-
16- # https://github.com/vllm-project/vllm/issues/13392
17- rm -rf ~ /.cache/vllm/torch_compile_cache
1815}
1916
2017setup_vllm () {
@@ -43,8 +40,15 @@ build_vllm() {
4340 # TODO (huydhn) I'll setup remote cache for this later
4441 SCCACHE_CACHE_SIZE=100G sccache --start-server || true
4542 # Build and install vLLM
46- pip install -r requirements-build.txt
47- pip install --editable .
43+ if command -v nvidia-smi; then
44+ pip install -r requirements/build.txt
45+ pip install --editable .
46+ elif command -v amd-smi; then
47+ pip install -r requirements/rocm.txt
48+ pip install -r requirements/rocm-build.txt
49+ # https://docs.vllm.ai/en/latest/getting_started/installation/gpu/index.html?device=rocm
50+ PYTORCH_ROCM_ARCH=" gfx90a;gfx942" python setup.py develop
51+ fi
4852 popd
4953}
5054
@@ -65,19 +69,22 @@ run_benchmark() {
6569upload_results () {
6670 if [[ " ${UPLOAD_BENCHMARK_RESULTS:- 1} " == " 1" ]]; then
6771 # Upload the benchmark results
68- python upload_benchmark_results.py --vllm vllm --benchmark-results vllm/benchmarks/results
72+ python upload_benchmark_results.py \
73+ --vllm vllm \
74+ --benchmark-results vllm/benchmarks/results \
75+ --device " ${GPU_DEVICE} "
6976
7077 pushd vllm
7178 if [[ -f benchmarks/results/benchmark_results.md ]]; then
7279 # Upload the markdown file
73- S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /benchmark_results.md"
80+ S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /${GPU_DEVICE} / benchmark_results.md"
7481 aws s3 cp --acl public-read \
7582 benchmarks/results/benchmark_results.md " s3://ossci-benchmarks/${S3_PATH} "
7683 fi
7784
7885 if [[ -f benchmarks.log ]]; then
7986 # Upload the logs
80- S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /benchmarks.log"
87+ S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /${GPU_DEVICE} / benchmarks.log"
8188 aws s3 cp --acl public-read \
8289 benchmarks.log " s3://ossci-benchmarks/${S3_PATH} "
8390 fi
@@ -99,7 +106,13 @@ pushd vllm
99106export HEAD_BRANCH=main
100107export HEAD_SHA=$( git rev-parse --verify HEAD)
101108
102- S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /benchmark_results.json"
109+ if command -v nvidia-smi; then
110+ declare -g GPU_DEVICE=$( nvidia-smi -i 0 --query-gpu=name --format=csv,noheader | awk ' {print $2}' )
111+ elif command -v amd-smi; then
112+ declare -g GPU_DEVICE=$( amd-smi static -g 0 -a | grep ' MARKET_NAME' | awk ' {print $2}' )
113+ fi
114+
115+ S3_PATH=" v3/vllm-project/vllm/${HEAD_BRANCH} /${HEAD_SHA} /${GPU_DEVICE} /benchmark_results.json"
103116aws s3api head-object --bucket ossci-benchmarks --key ${S3_PATH} || NOT_EXIST=1
104117
105118if [[ ${NOT_EXIST:- 0} == " 0" && " ${OVERWRITE_BENCHMARK_RESULTS:- 0} " != " 1" ]]; then
0 commit comments