Skip to content

Commit c6919f4

Browse files
Add support for JetPack 6.1 build (#3211)
1 parent 0c060d1 commit c6919f4

File tree

8 files changed

+216
-3
lines changed

8 files changed

+216
-3
lines changed

docsrc/getting_started/jetpack.rst

+119
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
.. _Torch_TensorRT_in_JetPack_6.1
2+
3+
Overview
4+
##################
5+
6+
JetPack 6.1
7+
---------------------
8+
Nvida JetPack 6.1 is the latest production release ofJetPack 6.
9+
With this release it incorporates:
10+
CUDA 12.6
11+
TensorRT 10.3
12+
cuDNN 9.3
13+
DLFW 24.09
14+
15+
You can find more details for the JetPack 6.1:
16+
17+
* https://docs.nvidia.com/jetson/jetpack/release-notes/index.html
18+
* https://docs.nvidia.com/deeplearning/frameworks/install-pytorch-jetson-platform/index.html
19+
20+
21+
Prerequisites
22+
~~~~~~~~~~~~~~
23+
24+
25+
Ensure your jetson developer kit has been flashed with the latest JetPack 6.1. You can find more details on how to flash Jetson board via sdk-manager:
26+
27+
* https://developer.nvidia.com/sdk-manager
28+
29+
30+
check the current jetpack version using
31+
32+
.. code-block:: sh
33+
34+
apt show nvidia-jetpack
35+
36+
Ensure you have installed JetPack Dev components. This step is required if you need to build on jetson board.
37+
38+
You can only install the dev components that you require: ex, tensorrt-dev would be the meta-package for all TRT development or install everthing.
39+
40+
.. code-block:: sh
41+
# install all the nvidia-jetpack dev components
42+
sudo apt-get update
43+
sudo apt-get install nvidia-jetpack
44+
45+
Ensure you have cuda 12.6 installed(this should be installed automatically from nvidia-jetpack)
46+
47+
.. code-block:: sh
48+
49+
# check the cuda version
50+
nvcc --version
51+
# if not installed or the version is not 12.6, install via the below cmd:
52+
sudo apt-get update
53+
sudo apt-get install cuda-toolkit-12-6
54+
55+
Ensure libcusparseLt.so exists at /usr/local/cuda/lib64/:
56+
57+
.. code-block:: sh
58+
59+
# if not exist, download and copy to the directory
60+
wget https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
61+
tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
62+
sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/
63+
sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/
64+
65+
66+
Build torch_tensorrt
67+
~~~~~~~~~~~~~~
68+
69+
70+
Install bazel
71+
72+
.. code-block:: sh
73+
74+
wget -v https://github.com/bazelbuild/bazelisk/releases/download/v1.20.0/bazelisk-linux-arm64
75+
sudo mv bazelisk-linux-arm64 /usr/bin/bazel
76+
chmod +x /usr/bin/bazel
77+
78+
Install pip and required python packages:
79+
* https://pip.pypa.io/en/stable/installation/
80+
81+
.. code-block:: sh
82+
83+
# install pip
84+
wget https://bootstrap.pypa.io/get-pip.py
85+
python get-pip.py
86+
87+
.. code-block:: sh
88+
89+
# install pytorch from nvidia jetson distribution: https://developer.download.nvidia.com/compute/redist/jp/v61/pytorch
90+
python -m pip install torch https://developer.download.nvidia.com/compute/redist/jp/v61/pytorch/torch-2.5.0a0+872d972e41.nv24.08.17622132-cp310-cp310-linux_aarch64.whl
91+
92+
.. code-block:: sh
93+
94+
# install required python packages
95+
python -m pip install -r toolchains/jp_workspaces/requirements.txt
96+
97+
# if you want to run the test cases, then install the test required python packages
98+
python -m pip install -r toolchains/jp_workspaces/test_requirements.txt
99+
100+
101+
Build and Install torch_tensorrt wheel file
102+
103+
104+
Since torch_tensorrt version has dependencies on torch version. torch version supported by JetPack6.1 is from DLFW 24.08/24.09(torch 2.5.0).
105+
106+
Please make sure to build torch_tensorrt wheel file from source release/2.5 branch
107+
(TODO: lanl to update the branch name once release/ngc branch is available)
108+
109+
.. code-block:: sh
110+
111+
cuda_version=$(nvcc --version | grep Cuda | grep release | cut -d ',' -f 2 | sed -e 's/ release //g')
112+
export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))")
113+
export SITE_PACKAGE_PATH=${TORCH_INSTALL_PATH::-6}
114+
export CUDA_HOME=/usr/local/cuda-${cuda_version}/
115+
# replace the MODULE.bazel with the jetpack one
116+
cat toolchains/jp_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
117+
# build and install torch_tensorrt wheel file
118+
python setup.py --use-cxx11-abi install --user
119+

docsrc/index.rst

+1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ Getting Started
2626
:hidden:
2727

2828
getting_started/installation
29+
getting_started/jetpack
2930
getting_started/quick_start
3031

3132
User Guide

setup.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -156,12 +156,14 @@ def load_dep_info():
156156
JETPACK_VERSION = "4.6"
157157
elif version == "5.0":
158158
JETPACK_VERSION = "5.0"
159+
elif version == "6.1":
160+
JETPACK_VERSION = "6.1"
159161

160162
if not JETPACK_VERSION:
161163
warnings.warn(
162-
"Assuming jetpack version to be 5.0, if not use the --jetpack-version option"
164+
"Assuming jetpack version to be 6.1, if not use the --jetpack-version option"
163165
)
164-
JETPACK_VERSION = "5.0"
166+
JETPACK_VERSION = "6.1"
165167

166168
if not CXX11_ABI:
167169
warnings.warn(
@@ -213,12 +215,15 @@ def build_libtorchtrt_pre_cxx11_abi(
213215
elif JETPACK_VERSION == "5.0":
214216
cmd.append("--platforms=//toolchains:jetpack_5.0")
215217
print("Jetpack version: 5.0")
218+
elif JETPACK_VERSION == "6.1":
219+
cmd.append("--platforms=//toolchains:jetpack_6.1")
220+
print("Jetpack version: 6.1")
216221

217222
if CI_BUILD:
218223
cmd.append("--platforms=//toolchains:ci_rhel_x86_64_linux")
219224
print("CI based build")
220225

221-
print("building libtorchtrt")
226+
print(f"building libtorchtrt {cmd=}")
222227
status_code = subprocess.run(cmd).returncode
223228

224229
if status_code != 0:

toolchains/BUILD

+9
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,15 @@ platform(
3535
],
3636
)
3737

38+
platform(
39+
name = "jetpack_6.1",
40+
constraint_values = [
41+
"@platforms//os:linux",
42+
"@platforms//cpu:aarch64",
43+
"@//toolchains/jetpack:6.1",
44+
],
45+
)
46+
3847
platform(
3948
name = "ci_rhel_x86_64_linux",
4049
constraint_values = [

toolchains/jetpack/BUILD

+5
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,8 @@ constraint_value(
1111
name = "4.6",
1212
constraint_setting = ":jetpack",
1313
)
14+
15+
constraint_value(
16+
name = "6.1",
17+
constraint_setting = ":jetpack",
18+
)
+61
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
module(
2+
name = "torch_tensorrt",
3+
repo_name = "org_pytorch_tensorrt",
4+
version = "${BUILD_VERSION}"
5+
)
6+
7+
bazel_dep(name = "googletest", version = "1.14.0")
8+
bazel_dep(name = "platforms", version = "0.0.10")
9+
bazel_dep(name = "rules_cc", version = "0.0.9")
10+
bazel_dep(name = "rules_python", version = "0.34.0")
11+
12+
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
13+
python.toolchain(
14+
ignore_root_user_error = True,
15+
python_version = "3.11",
16+
)
17+
18+
bazel_dep(name = "rules_pkg", version = "1.0.1")
19+
git_override(
20+
module_name = "rules_pkg",
21+
commit = "17c57f4",
22+
remote = "https://github.com/narendasan/rules_pkg",
23+
)
24+
25+
local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "local_repository")
26+
27+
# External dependency for torch_tensorrt if you already have precompiled binaries.
28+
local_repository(
29+
name = "torch_tensorrt",
30+
path = "${SITE_PACKAGE_PATH}/torch_tensorrt",
31+
)
32+
33+
34+
new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")
35+
36+
# CUDA should be installed on the system locally
37+
new_local_repository(
38+
name = "cuda",
39+
build_file = "@//third_party/cuda:BUILD",
40+
path = "${CUDA_HOME}",
41+
)
42+
43+
new_local_repository(
44+
name = "libtorch",
45+
path = "${TORCH_INSTALL_PATH}",
46+
build_file = "third_party/libtorch/BUILD",
47+
)
48+
49+
new_local_repository(
50+
name = "libtorch_pre_cxx11_abi",
51+
path = "${TORCH_INSTALL_PATH}",
52+
build_file = "third_party/libtorch/BUILD"
53+
)
54+
55+
new_local_repository(
56+
name = "tensorrt",
57+
path = "/usr/",
58+
build_file = "@//third_party/tensorrt/local:BUILD"
59+
)
60+
61+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
setuptools==70.2.0
2+
numpy<2.0.0
3+
packaging
4+
pyyaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
expecttest==0.1.6
2+
networkx==2.8.8
3+
numpy<2.0.0
4+
parameterized>=0.2.0
5+
pytest>=8.2.1
6+
pytest-xdist>=3.6.1
7+
pyyaml
8+
transformers
9+
# TODO: currently timm torchvision nvidia-modelopt does not have distributions for jetson

0 commit comments

Comments
 (0)