Skip to content

Commit 3386b92

Browse files
committed
bump to cuda 12.8 and TRT 10.8
1 parent 0a46392 commit 3386b92

16 files changed

+61
-68
lines changed

.github/scripts/generate-tensorrt-test-matrix.py

Lines changed: 12 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
# channel: nightly if the future tensorRT version test workflow is triggered from the main branch or your personal branch
1212
# channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....)
1313
CUDA_VERSIONS_DICT = {
14-
"nightly": ["cu126"],
15-
"test": ["cu124", "cu126"],
16-
"release": ["cu124", "cu126"],
14+
"nightly": ["cu128"],
15+
"test": ["cu124", "cu126", "cu128"],
16+
"release": ["cu124", "cu126", "cu128"],
1717
}
1818

1919
# please update the python version you want to test with the future tensorRT version here
@@ -28,14 +28,6 @@
2828
# please update the future tensorRT version you want to test here
2929
TENSORRT_VERSIONS_DICT = {
3030
"windows": {
31-
"10.4.0": {
32-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.4.0/zip/TensorRT-10.4.0.26.Windows.win10.cuda-12.6.zip",
33-
"strip_prefix": "TensorRT-10.4.0.26",
34-
},
35-
"10.5.0": {
36-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.5.0/zip/TensorRT-10.5.0.18.Windows.win10.cuda-12.6.zip",
37-
"strip_prefix": "TensorRT-10.5.0.18",
38-
},
3931
"10.6.0": {
4032
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/zip/TensorRT-10.6.0.26.Windows.win10.cuda-12.6.zip",
4133
"strip_prefix": "TensorRT-10.6.0.26",
@@ -44,16 +36,12 @@
4436
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip",
4537
"strip_prefix": "TensorRT-10.7.0.23",
4638
},
39+
"10.8.0": {
40+
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/zip/TensorRT-10.8.0.43.Windows.win10.cuda-12.8.zip",
41+
"strip_prefix": "TensorRT-10.8.0.43",
42+
},
4743
},
4844
"linux": {
49-
"10.4.0": {
50-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.4.0/tars/TensorRT-10.4.0.26.Linux.x86_64-gnu.cuda-12.6.tar.gz",
51-
"strip_prefix": "TensorRT-10.4.0.26",
52-
},
53-
"10.5.0": {
54-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.5.0/tars/TensorRT-10.5.0.18.Linux.x86_64-gnu.cuda-12.6.tar.gz",
55-
"strip_prefix": "TensorRT-10.5.0.18",
56-
},
5745
"10.6.0": {
5846
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/tars/TensorRT-10.6.0.26.Linux.x86_64-gnu.cuda-12.6.tar.gz",
5947
"strip_prefix": "TensorRT-10.6.0.26",
@@ -62,6 +50,10 @@
6250
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
6351
"strip_prefix": "TensorRT-10.7.0.23",
6452
},
53+
"10.8.0": {
54+
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz",
55+
"strip_prefix": "TensorRT-10.8.0.43",
56+
},
6557
},
6658
}
6759

@@ -87,7 +79,7 @@ def check_file_availability(url: str) -> bool:
8779
# calculate the next minor version
8880
minor = int(list(TENSORRT_VERSIONS_DICT["linux"].keys())[-1].split(".")[1]) + 1
8981
trt_version = f"{major}.{minor}.0"
90-
for patch in range(patch_from, 50):
82+
for patch in range(patch_from, 80):
9183
for cuda_minor in range(4, 11):
9284
trt_linux_release_url_candidate = f"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/{trt_version}/tars/TensorRT-{trt_version}.{patch}.Linux.x86_64-gnu.cuda-12.{cuda_minor}.tar.gz"
9385
if check_file_availability(trt_linux_release_url_candidate):

.github/scripts/generate_binary_build_matrix.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"release": ["3.9", "3.10", "3.11", "3.12"],
2525
}
2626
CUDA_ARCHES_DICT = {
27-
"nightly": ["11.8", "12.4", "12.6"],
27+
"nightly": ["11.8", "12.4", "12.6", "12.8"],
2828
"test": ["11.8", "12.1", "12.4"],
2929
"release": ["11.8", "12.1", "12.4"],
3030
}
@@ -34,12 +34,12 @@
3434
"release": ["6.1", "6.2"],
3535
}
3636

37-
CUDA_CUDDN_VERSIONS = {
38-
"11.8": {"cuda": "11.8.0", "cudnn": "9"},
39-
"12.1": {"cuda": "12.1.1", "cudnn": "9"},
40-
"12.4": {"cuda": "12.4.1", "cudnn": "9"},
41-
"12.6": {"cuda": "12.6.2", "cudnn": "9"},
42-
}
37+
# CUDA_CUDDN_VERSIONS = {
38+
# "11.8": {"cuda": "11.8.0", "cudnn": "9"},
39+
# "12.1": {"cuda": "12.1.1", "cudnn": "9"},
40+
# "12.4": {"cuda": "12.4.1", "cudnn": "9"},
41+
# "12.6": {"cuda": "12.6.2", "cudnn": "9"},
42+
# }
4343

4444
PACKAGE_TYPES = ["wheel", "conda", "libtorch"]
4545
PRE_CXX11_ABI = "pre-cxx11"
@@ -151,6 +151,7 @@ def initialize_globals(channel: str, build_python_only: bool) -> None:
151151
"12.1": "pytorch/manylinux2_28-builder:cuda12.1",
152152
"12.4": "pytorch/manylinux2_28-builder:cuda12.4",
153153
"12.6": "pytorch/manylinux2_28-builder:cuda12.6",
154+
"12.8": "pytorch/manylinux2_28-builder:cuda12.8",
154155
**{
155156
gpu_arch: f"pytorch/manylinux2_28-builder:rocm{gpu_arch}"
156157
for gpu_arch in ROCM_ARCHES
@@ -278,7 +279,7 @@ def get_wheel_install_command(
278279
return f"{WHL_INSTALL_BASE} {PACKAGES_TO_INSTALL_WHL} --index-url {get_base_download_url_for_repo('whl', channel, gpu_arch_type, desired_cuda)}_pypi_pkg" # noqa: E501
279280
else:
280281
raise ValueError(
281-
"Split build is not supported for this configuration. It is only supported for CUDA 11.8, 12.4, 12.6 on Linux nightly builds." # noqa: E501
282+
"Split build is not supported for this configuration. It is only supported for CUDA 11.8, 12.4, 12.6, 12.8 on Linux nightly builds." # noqa: E501
282283
)
283284
if (
284285
channel == RELEASE

.github/workflows/docgen.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ jobs:
1414
if: ${{ ! contains(github.actor, 'pytorchbot') }}
1515
environment: pytorchbot-env
1616
container:
17-
image: docker.io/pytorch/manylinux2_28-builder:cuda12.6
17+
image: docker.io/pytorch/manylinux2_28-builder:cuda12.8
1818
options: --gpus all
1919
env:
20-
CUDA_HOME: /usr/local/cuda-12.6
20+
CUDA_HOME: /usr/local/cuda-12.8
2121
VERSION_SUFFIX: cu126
2222
CU_VERSION: cu126
2323
CHANNEL: nightly

MODULE.bazel

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,13 @@ new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.
3636
new_local_repository(
3737
name = "cuda",
3838
build_file = "@//third_party/cuda:BUILD",
39-
path = "/usr/local/cuda-12.6/",
39+
path = "/usr/local/cuda-12.8/",
4040
)
4141

4242
new_local_repository(
4343
name = "cuda_win",
4444
build_file = "@//third_party/cuda:BUILD",
45-
path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.6/",
45+
path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.8/",
4646
)
4747

4848
http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
@@ -79,18 +79,18 @@ http_archive(
7979
http_archive(
8080
name = "tensorrt",
8181
build_file = "@//third_party/tensorrt/archive:BUILD",
82-
strip_prefix = "TensorRT-10.7.0.23",
82+
strip_prefix = "TensorRT-10.8.0.43",
8383
urls = [
84-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
84+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz",
8585
],
8686
)
8787

8888
http_archive(
8989
name = "tensorrt_win",
9090
build_file = "@//third_party/tensorrt/archive:BUILD",
91-
strip_prefix = "TensorRT-10.7.0.23",
91+
strip_prefix = "TensorRT-10.8.0.43",
9292
urls = [
93-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip",
93+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/zip/TensorRT-10.8.0.43.Windows.win10.cuda-12.8.zip",
9494
],
9595
)
9696

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ Torch-TensorRT
77
[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/)
88
[![pytorch](https://img.shields.io/badge/PyTorch-2.4-green)](https://www.python.org/downloads/release/python-31013/)
99
[![cuda](https://img.shields.io/badge/CUDA-12.4-green)](https://developer.nvidia.com/cuda-downloads)
10-
[![trt](https://img.shields.io/badge/TensorRT-10.7.0-green)](https://github.com/nvidia/tensorrt-llm)
10+
[![trt](https://img.shields.io/badge/TensorRT-10.8.0-green)](https://github.com/nvidia/tensorrt-llm)
1111
[![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE)
1212
[![linux_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml)
1313
[![windows_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml)
@@ -119,7 +119,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
119119
- Bazel 6.3.2
120120
- Libtorch 2.5.0.dev (latest nightly) (built with CUDA 12.4)
121121
- CUDA 12.4
122-
- TensorRT 10.7.0.23
122+
- TensorRT 10.8.0.43
123123
124124
## Deprecation Policy
125125

dev_dep_versions.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
__cuda_version__: "12.6"
2-
__tensorrt_version__: "10.7.0.post1"
1+
__cuda_version__: "12.8"
2+
__tensorrt_version__: "10.8.0"

docker/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
* [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional.
88
* [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.10`, though this is optional as well.
99

10-
* This `Dockerfile` installs `cxx11-abi` versions of Pytorch and builds Torch-TRT using `cxx11-abi` libtorch as well. As of torch 2.7, torch requires `cxx11-abi` for all CUDA 11.8, 12.4, and 12.6.
10+
* This `Dockerfile` installs `cxx11-abi` versions of Pytorch and builds Torch-TRT using `cxx11-abi` libtorch as well. As of torch 2.7, torch requires `cxx11-abi` for all CUDA 11.8, 12.4, 12.6, and later versions.
1111

1212
Note: By default the container uses the `cxx11-abi` version of Torch + Torch-TRT. If you are using a workflow that requires a build of PyTorch on the PRE CXX11 ABI, please add the Docker build argument: `--build-arg USE_PRE_CXX11_ABI=1`
1313

@@ -17,14 +17,14 @@ Note: By default the container uses the `cxx11-abi` version of Torch + Torch-TRT
1717

1818
### Instructions
1919

20-
- The example below uses TensorRT 10.7.0.23
20+
- The example below uses TensorRT 10.8.0.43
2121
- See <a href="https://github.com/pytorch/TensorRT#dependencies">dependencies</a> for a list of current default dependencies.
2222

2323
> From root of Torch-TensorRT repo
2424
2525
Build:
2626
```
27-
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.7.0 -f docker/Dockerfile -t torch_tensorrt:latest .
27+
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.8.0 -f docker/Dockerfile -t torch_tensorrt:latest .
2828
```
2929

3030
Run:

examples/dynamo/auto_generate_converters.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
===================================================================
66
77
We are going to demonstrate how to automatically generate a converter for a custom kernel using Torch-TensorRT using
8-
the new Python based plugin system in TensorRT 10.7.
8+
the new Python based plugin system in TensorRT 10.8.
99
1010
Torch-TensorRT supports falling back to PyTorch implementations of operations in the case that Torch-TensorRT
1111
does not know how to compile them in TensorRT. However, this comes at the cost of a graph break and will reduce the performance of the model.
@@ -21,7 +21,7 @@
2121
the performance and resource overhead from a graph break.
2222
2323
Previously this involved a complex process in not only building a performant kernel but setting it up to run in TensorRT (see: `Using Custom Kernels within TensorRT Engines with Torch-TensorRT <https://pytorch.org/TensorRT/tutorials/_rendered_examples/dynamo/custom_kernel_plugins.html>`_).
24-
With TensorRT 10.7, there is a new Python native plugin system which greatly streamlines this process. This
24+
With TensorRT 10.8, there is a new Python native plugin system which greatly streamlines this process. This
2525
plugin system also allows Torch-TensorRT to automatically generate the necessary conversion code to convert the
2626
operation in PyTorch to TensorRT.
2727
"""
@@ -99,7 +99,7 @@ def _(x: torch.Tensor, y: torch.Tensor, b: float = 0.2, a: int = 2) -> torch.Ten
9999
# %%
100100
# Writing Plugins for TensorRT using the Quick Deploy Plugin system
101101
# -------------------------------------------------------------------
102-
# The quick deployment plugin system in TensorRT 10.7 allows for the creation of custom plugins in Python with significantly
102+
# The quick deployment plugin system in TensorRT 10.8 allows for the creation of custom plugins in Python with significantly
103103
# less boilerplate. It uses a similar system PyTorch where you define a function that describes the shape and data type transformations
104104
# that the operator will perform and then define the code to launch the kernel given GPU memory handles.
105105
#

packaging/pre_build_script.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(t
2424
if [[ ${TENSORRT_VERSION} != "" ]]; then
2525
# this is the upgraded TensorRT version, replace current tensorrt version to the upgrade tensorRT version in the pyproject.toml
2626
# example: __tensorrt_version__: ">=10.3.0,<=10.6.0"
27-
# replace: tensorrt-cu12>=10.3.0,<=10.6.0 to tensorrt-cu12==10.7.0
27+
# replace: tensorrt-cu12>=10.3.0,<=10.6.0 to tensorrt-cu12==10.8.0
2828
current_version=$(cat dev_dep_versions.yml | grep __tensorrt_version__ | sed 's/__tensorrt_version__: //g' | sed 's/"//g')
2929
sed -i -e "s/tensorrt-cu12${current_version}/tensorrt-cu12==${TENSORRT_VERSION}/g" \
3030
-e "s/tensorrt-cu12-bindings${current_version}/tensorrt-cu12-bindings==${TENSORRT_VERSION}/g" \

packaging/pre_build_script_windows.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ echo TENSORRT_VERSION=${TENSORRT_VERSION}
99
if [[ ${TENSORRT_VERSION} != "" ]]; then
1010
# this is the upgraded TensorRT version, replace current tensorrt version to the upgrade tensorRT version in the pyproject.toml
1111
# example: __tensorrt_version__: ">=10.3.0,<=10.6.0"
12-
# replace: tensorrt-cu12>=10.3.0,<=10.6.0 to tensorrt-cu12==10.7.0
12+
# replace: tensorrt-cu12>=10.3.0,<=10.6.0 to tensorrt-cu12==10.8.0
1313
current_version=$(cat dev_dep_versions.yml | grep __tensorrt_version__ | sed 's/__tensorrt_version__: //g' | sed 's/"//g')
1414
sed -i -e "s/tensorrt-cu12${current_version}/tensorrt-cu12==${TENSORRT_VERSION}/g" \
1515
-e "s/tensorrt-cu12-bindings${current_version}/tensorrt-cu12-bindings==${TENSORRT_VERSION}/g" \

packaging/smoke_test_script.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22
# The issue was smoke test installs the built torch_tensorrt wheel file and checks `import torch_tensorrt; print(torch_tensorrt.__version__)`
33
# Since tensorrt cannot be pip installable in CI, the smoke test will fail.
44
# One way we tried to handle it is manually install tensorrt wheel while by extracting from the tarball.
5-
# However, the TensorRT-10.7.0.23/lib path doesn't seem to show up in LD_LIBRARY_PATH even if we explicitly set it.
5+
# However, the TensorRT-10.8.0.43/lib path doesn't seem to show up in LD_LIBRARY_PATH even if we explicitly set it.
66
# TODO: Implement a custom smoke_test script to verify torch_tensorrt installation.

py/ci/Dockerfile.ci

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
1-
FROM pytorch/manylinux2_28-builder:cuda12.6
1+
FROM pytorch/manylinux2_28-builder:cuda12.8
22

33
RUN yum install -y ninja-build
44

55
# download TensorRT tarball
6-
RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz \
7-
&& gunzip TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz \
8-
&& tar -xvf TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar \
9-
&& rm TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar
6+
RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz \
7+
&& gunzip TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz \
8+
&& tar -xvf TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar \
9+
&& rm TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar
1010

11-
ENV TENSORRT_DIR=/TensorRT-10.7.0.23
12-
ENV TENSORRT_VERSION=10.7.0
11+
ENV TENSORRT_DIR=/TensorRT-10.8.0.43
12+
ENV TENSORRT_VERSION=10.8.0
1313

1414
RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \
1515
&& mv bazelisk-linux-amd64 /usr/bin/bazel \

pyproject.toml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ requires = [
88
"cffi>=1.15.1",
99
"typing-extensions>=4.7.0",
1010
"future>=0.18.3",
11-
"tensorrt-cu12>=10.7.0.post1,<10.8.0",
11+
"tensorrt-cu12>=10.8.0,<10.9.0",
1212
"torch>=2.7.0.dev,<2.8.0",
1313
"pybind11==2.6.2",
1414
"numpy",
@@ -55,10 +55,10 @@ keywords = [
5555
]
5656
dependencies = [
5757
"torch>=2.7.0.dev,<2.8.0",
58-
"tensorrt>=10.7.0.post1,<10.8.0",
59-
"tensorrt-cu12>=10.7.0.post1,<10.8.0",
60-
"tensorrt-cu12-bindings>=10.7.0,<10.8.0",
61-
"tensorrt-cu12-libs>=10.7.0,<10.8.0",
58+
"tensorrt>=10.8.0,<10.9.0",
59+
"tensorrt-cu12>=10.8.0,<10.9.0",
60+
"tensorrt-cu12-bindings>=10.8.0,<10.9.0",
61+
"tensorrt-cu12-libs>=10.8.0,<10.9.0",
6262
"packaging>=23",
6363
"numpy",
6464
"typing-extensions>=4.7.0",

toolchains/ci_workspaces/MODULE.bazel.tmpl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,18 +67,18 @@ http_archive(
6767
http_archive(
6868
name = "tensorrt",
6969
build_file = "@//third_party/tensorrt/archive:BUILD",
70-
strip_prefix = "TensorRT-10.7.0.23",
70+
strip_prefix = "TensorRT-10.8.0.43",
7171
urls = [
72-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
72+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz",
7373
],
7474
)
7575

7676
http_archive(
7777
name = "tensorrt_win",
7878
build_file = "@//third_party/tensorrt/archive:BUILD",
79-
strip_prefix = "TensorRT-10.7.0.23",
79+
strip_prefix = "TensorRT-10.8.0.43",
8080
urls = [
81-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip",
81+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/zip/TensorRT-10.8.0.43.Windows.win10.cuda-12.8.zip",
8282
],
8383
)
8484

toolchains/legacy/WORKSPACE.win.release.tmpl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ http_archive(
6363

6464
new_local_repository(
6565
name = "tensorrt_win",
66-
path = "C:/TensorRT-10.7.0.23",
66+
path = "C:/TensorRT-10.8.0.43",
6767
build_file = "@//third_party/tensorrt/local:BUILD"
6868
)
6969

toolchains/legacy/WORKSPACE.x86_64.release.rhel.tmpl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,9 @@ http_archive(
7171
http_archive(
7272
name = "tensorrt",
7373
build_file = "@//third_party/tensorrt/archive:BUILD",
74-
strip_prefix = "TensorRT-10.7.0.23",
74+
strip_prefix = "TensorRT-10.8.0.43",
7575
urls = [
76-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
76+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz",
7777
],
7878
)
7979

0 commit comments

Comments
 (0)