Skip to content

Commit de4f8b9

Browse files
eqypytorchmergebot
authored andcommitted
[BE]: Update cudnn to 9.1.0.70 (pytorch#123475)
cuDNN has managed to upload cu11 and cu12 wheels for ~~9.0.0.312~~ 9.1.0.70, so trying this out... CC @Skylion007 @malfet Co-authored-by: Wei Wang <[email protected]> Co-authored-by: atalman <[email protected]> Pull Request resolved: pytorch#123475 Approved by: https://github.com/Skylion007, https://github.com/malfet, https://github.com/nWEIdia, https://github.com/atalman
1 parent fba21ed commit de4f8b9

31 files changed

+142
-147
lines changed

.ci/docker/build.sh

+25-25
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,9 @@ _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
9191
# configuration, so we hardcode everything here rather than do it
9292
# from scratch
9393
case "$image" in
94-
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9)
94+
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
9595
CUDA_VERSION=12.4.0
96-
CUDNN_VERSION=8
96+
CUDNN_VERSION=9
9797
ANACONDA_PYTHON_VERSION=3.10
9898
GCC_VERSION=9
9999
PROTOBUF=yes
@@ -105,9 +105,9 @@ case "$image" in
105105
CONDA_CMAKE=yes
106106
TRITON=yes
107107
;;
108-
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
108+
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9)
109109
CUDA_VERSION=12.1.1
110-
CUDNN_VERSION=8
110+
CUDNN_VERSION=9
111111
ANACONDA_PYTHON_VERSION=3.10
112112
GCC_VERSION=9
113113
PROTOBUF=yes
@@ -119,9 +119,9 @@ case "$image" in
119119
CONDA_CMAKE=yes
120120
TRITON=yes
121121
;;
122-
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks)
122+
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks)
123123
CUDA_VERSION=12.4.0
124-
CUDNN_VERSION=8
124+
CUDNN_VERSION=9
125125
ANACONDA_PYTHON_VERSION=3.10
126126
GCC_VERSION=9
127127
PROTOBUF=yes
@@ -134,9 +134,9 @@ case "$image" in
134134
TRITON=yes
135135
INDUCTOR_BENCHMARKS=yes
136136
;;
137-
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks)
137+
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks)
138138
CUDA_VERSION=12.1.1
139-
CUDNN_VERSION=8
139+
CUDNN_VERSION=9
140140
ANACONDA_PYTHON_VERSION=3.10
141141
GCC_VERSION=9
142142
PROTOBUF=yes
@@ -149,9 +149,9 @@ case "$image" in
149149
TRITON=yes
150150
INDUCTOR_BENCHMARKS=yes
151151
;;
152-
pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks)
152+
pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks)
153153
CUDA_VERSION=12.1.1
154-
CUDNN_VERSION=8
154+
CUDNN_VERSION=9
155155
ANACONDA_PYTHON_VERSION=3.12
156156
GCC_VERSION=9
157157
PROTOBUF=yes
@@ -164,9 +164,9 @@ case "$image" in
164164
TRITON=yes
165165
INDUCTOR_BENCHMARKS=yes
166166
;;
167-
pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks)
167+
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks)
168168
CUDA_VERSION=12.4.0
169-
CUDNN_VERSION=8
169+
CUDNN_VERSION=9
170170
ANACONDA_PYTHON_VERSION=3.12
171171
GCC_VERSION=9
172172
PROTOBUF=yes
@@ -179,9 +179,9 @@ case "$image" in
179179
TRITON=yes
180180
INDUCTOR_BENCHMARKS=yes
181181
;;
182-
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9)
182+
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
183183
CUDA_VERSION=11.8.0
184-
CUDNN_VERSION=8
184+
CUDNN_VERSION=9
185185
ANACONDA_PYTHON_VERSION=3.10
186186
GCC_VERSION=9
187187
PROTOBUF=yes
@@ -193,9 +193,9 @@ case "$image" in
193193
CONDA_CMAKE=yes
194194
TRITON=yes
195195
;;
196-
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9)
196+
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
197197
CUDA_VERSION=12.4.0
198-
CUDNN_VERSION=8
198+
CUDNN_VERSION=9
199199
ANACONDA_PYTHON_VERSION=3.10
200200
GCC_VERSION=9
201201
PROTOBUF=yes
@@ -207,9 +207,9 @@ case "$image" in
207207
CONDA_CMAKE=yes
208208
TRITON=yes
209209
;;
210-
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
210+
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9)
211211
CUDA_VERSION=12.1.1
212-
CUDNN_VERSION=8
212+
CUDNN_VERSION=9
213213
ANACONDA_PYTHON_VERSION=3.10
214214
GCC_VERSION=9
215215
PROTOBUF=yes
@@ -221,9 +221,9 @@ case "$image" in
221221
CONDA_CMAKE=yes
222222
TRITON=yes
223223
;;
224-
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9)
224+
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
225225
CUDA_VERSION=12.4.0
226-
CUDNN_VERSION=8
226+
CUDNN_VERSION=9
227227
ANACONDA_PYTHON_VERSION=3.10
228228
GCC_VERSION=9
229229
PROTOBUF=yes
@@ -330,10 +330,10 @@ case "$image" in
330330
DOCS=yes
331331
INDUCTOR_BENCHMARKS=yes
332332
;;
333-
pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12)
333+
pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12)
334334
ANACONDA_PYTHON_VERSION=3.8
335335
CUDA_VERSION=11.8
336-
CUDNN_VERSION=8
336+
CUDNN_VERSION=9
337337
CLANG_VERSION=12
338338
PROTOBUF=yes
339339
DB=yes
@@ -380,7 +380,7 @@ case "$image" in
380380
ANACONDA_PYTHON_VERSION=3.9
381381
CONDA_CMAKE=yes
382382
;;
383-
pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter)
383+
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
384384
ANACONDA_PYTHON_VERSION=3.9
385385
CUDA_VERSION=11.8
386386
CONDA_CMAKE=yes
@@ -447,7 +447,7 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
447447
#when using cudnn version 8 install it separately from cuda
448448
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
449449
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
450-
if [[ ${CUDNN_VERSION} == 8 ]]; then
450+
if [[ ${CUDNN_VERSION} == 9 ]]; then
451451
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
452452
fi
453453
fi
@@ -499,7 +499,7 @@ docker build \
499499
"$@" \
500500
.
501501

502-
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
502+
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
503503
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
504504
# find the correct image. As a result, here we have to replace the
505505
# "$UBUNTU_VERSION" == "18.04-rc"

.ci/docker/common/install_base.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
set -ex
44

55
install_ubuntu() {
6-
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
6+
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
77
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
88
# find the correct image. As a result, here we have to check for
99
# "$UBUNTU_VERSION" == "18.04"*

.ci/docker/common/install_cudnn.sh

+6-11
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,18 @@
11
#!/bin/bash
22

3-
if [[ ${CUDNN_VERSION} == 8 ]]; then
3+
if [[ -n "${CUDNN_VERSION}" ]]; then
44
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
55
mkdir tmp_cudnn
66
pushd tmp_cudnn
7-
if [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
8-
CUDNN_NAME="cudnn-linux-x86_64-8.9.7.29_cuda12-archive"
9-
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
10-
elif [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then
11-
CUDNN_NAME="cudnn-linux-x86_64-8.9.2.26_cuda12-archive"
12-
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
13-
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
14-
CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive"
15-
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz
7+
if [[ ${CUDA_VERSION:0:2} == "12" ]]; then
8+
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
9+
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
10+
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
1611
else
1712
print "Unsupported CUDA version ${CUDA_VERSION}"
1813
exit 1
1914
fi
20-
15+
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
2116
tar xf ${CUDNN_NAME}.tar.xz
2217
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
2318
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/

.ci/docker/ubuntu-cuda/Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
139139
ARG CUDNN_VERSION
140140
ARG CUDA_VERSION
141141
COPY ./common/install_cudnn.sh install_cudnn.sh
142-
RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi
142+
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
143143
RUN rm install_cudnn.sh
144144

145145
# Install CUSPARSELT

.github/scripts/generate_binary_build_matrix.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
CUDA_ARCHES_FULL_VERSION = {"11.8": "11.8.0", "12.1": "12.1.1", "12.4": "12.4.0"}
2020

2121

22-
CUDA_ARCHES_CUDNN_VERSION = {"11.8": "8", "12.1": "8", "12.4": "8"}
22+
CUDA_ARCHES_CUDNN_VERSION = {"11.8": "9", "12.1": "9", "12.4": "9"}
2323

2424

2525
ROCM_ARCHES = ["6.0", "6.1"]
@@ -42,7 +42,7 @@
4242
"nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | " # noqa: B950
4343
"nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | "
4444
"nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
45-
"nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | "
45+
"nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
4646
"nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | "
4747
"nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | "
4848
"nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
@@ -55,7 +55,7 @@
5555
"nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | " # noqa: B950
5656
"nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
5757
"nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
58-
"nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | "
58+
"nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
5959
"nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
6060
"nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | "
6161
"nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | "
@@ -68,7 +68,7 @@
6868
"nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
6969
"nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
7070
"nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
71-
"nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | "
71+
"nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
7272
"nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
7373
"nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | "
7474
"nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | "

.github/workflows/docker-builds.yml

+9-9
Original file line numberDiff line numberDiff line change
@@ -38,27 +38,27 @@ jobs:
3838
matrix:
3939
runner: [linux.12xlarge]
4040
docker-image-name: [
41-
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9,
42-
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks,
43-
pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks,
44-
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9,
45-
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks,
46-
pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks,
47-
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9,
41+
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9,
42+
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks,
43+
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks,
44+
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9,
45+
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks,
46+
pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks,
47+
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9,
4848
pytorch-linux-focal-py3.8-clang10,
4949
pytorch-linux-focal-py3.11-clang10,
5050
pytorch-linux-focal-py3.12-clang10,
5151
pytorch-linux-focal-rocm-n-1-py3,
5252
pytorch-linux-focal-rocm-n-py3,
53-
pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12,
53+
pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12,
5454
pytorch-linux-focal-py3-clang9-android-ndk-r21e,
5555
pytorch-linux-jammy-py3.8-gcc11,
5656
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks,
5757
pytorch-linux-jammy-xpu-2024.0-py3,
5858
pytorch-linux-jammy-py3-clang15-asan,
5959
pytorch-linux-focal-py3-clang10-onnx,
6060
pytorch-linux-focal-linter,
61-
pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter,
61+
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter,
6262
pytorch-linux-jammy-py3-clang12-executorch
6363
]
6464
include:

0 commit comments

Comments
 (0)