Skip to content

support nvidia-docker2 natively in local mode. #426

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Oct 13, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ CHANGELOG

* feature: Local Mode: Add support for Batch Inference
* feature: Add timestamp to secondary status in training job output
* enhancement: Local Mode: support nvidia-docker2 natively


1.11.2
======
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ def read(fname):
],

# Declare minimal set for installation
install_requires=['boto3>=1.4.8', 'numpy>=1.9.0', 'protobuf>=3.1', 'scipy>=0.19.0', 'urllib3>=1.2',
'PyYAML>=3.2', 'protobuf3-to-dict>=0.1.5'],
install_requires=['boto3>=1.4.8', 'numpy>=1.9.0', 'protobuf>=3.1', 'scipy>=0.19.0', 'urllib3 >=1.21, <1.23',
'PyYAML>=3.2', 'protobuf3-to-dict>=0.1.5', 'docker-compose>=1.21.0'],

extras_require={
'test': ['tox', 'flake8', 'pytest', 'pytest-cov', 'pytest-xdist',
Expand Down
9 changes: 7 additions & 2 deletions src/sagemaker/local/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,8 +362,8 @@ def _generate_compose_file(self, command, additional_volumes=None, additional_en
}

content = {
# Some legacy hosts only support the 2.1 format.
'version': '2.1',
# Use version 2.3 as a minimum so that we can specify the runtime
'version': '2.3',
'services': services,
'networks': {
'sagemaker-local': {'name': 'sagemaker-local'}
Expand Down Expand Up @@ -415,6 +415,11 @@ def _create_docker_host(self, host, environment, optml_subdirs, command, volumes
}
}

# for GPU support pass in nvidia as the runtime, this is equivalent
# to setting --runtime=nvidia in the docker commandline.
if self.instance_type == 'local_gpu':
host_config['runtime'] = 'nvidia'

if command == 'serve':
serving_port = sagemaker.utils.get_config_value('local.serving_port',
self.sagemaker_session.config) or 8080
Expand Down
21 changes: 21 additions & 0 deletions tests/unit/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,27 @@ def test_train_local_code(download_folder, _cleanup, popen, _stream_output,
assert '%s:/opt/ml/shared' % shared_folder_path in volumes


def test_container_has_gpu_support(tmpdir, sagemaker_session):
instance_count = 1
image = 'my-image'
sagemaker_container = _SageMakerContainer('local_gpu', instance_count, image,
sagemaker_session=sagemaker_session)

docker_host = sagemaker_container._create_docker_host('host-1', {}, set(), 'train', [])
assert 'runtime' in docker_host
assert docker_host['runtime'] == 'nvidia'


def test_container_does_not_enable_nvidia_docker_for_cpu_containers(tmpdir, sagemaker_session):
instance_count = 1
image = 'my-image'
sagemaker_container = _SageMakerContainer('local', instance_count, image,
sagemaker_session=sagemaker_session)

docker_host = sagemaker_container._create_docker_host('host-1', {}, set(), 'train', [])
assert 'runtime' not in docker_host


@patch('sagemaker.local.image._HostingContainer.run')
@patch('shutil.copy')
@patch('shutil.copytree')
Expand Down