Skip to content

Commit a84c10b

Browse files
authored
Merge branch 'master' into augmented-manifest-file
2 parents ee496cf + c042a6c commit a84c10b

26 files changed

+915
-304
lines changed

CHANGELOG.rst

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,15 @@
22
CHANGELOG
33
=========
44

5-
1.16.2
6-
======
5+
1.16.2.dev
6+
==========
7+
78
* feature: Add support for AugmentedManifestFile and ShuffleConfig
9+
* bug-fix: add version bound for requests module to avoid version conflicts between docker-compose and docker-py
10+
* bug-fix: Remove unnecessary dependency tensorflow
11+
* doc-fix: Change ``distribution`` to ``distributions``
12+
* bug-fix: Increase docker-compose http timeout and health check timeout to 120.
13+
* feature: Local Mode: Add support for intermediate output to a local directory.
814

915
1.16.1.post1
1016
============
@@ -30,6 +36,7 @@ CHANGELOG
3036
* feature: Add support for SageMaker Inference Pipelines
3137
* feature: Add support for SparkML serving container
3238

39+
3340
1.15.2
3441
======
3542

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def read(fname):
3535
# Declare minimal set for installation
3636
required_packages = ['boto3>=1.9.55', 'numpy>=1.9.0', 'protobuf>=3.1', 'scipy>=0.19.0',
3737
'urllib3>=1.21', 'PyYAML>=3.2, <4', 'protobuf3-to-dict>=0.1.5',
38-
'docker-compose>=1.23.0']
38+
'docker-compose>=1.23.0', 'requests>=2.14.2, !=2.18.0, <2.21']
3939

4040
# enum is introduced in Python 3.4. Installing enum back port
4141
if sys.version_info < (3, 4):

src/sagemaker/fw_utils.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -219,9 +219,11 @@ def framework_name_from_image(image_name):
219219
else:
220220
# extract framework, python version and image tag
221221
# We must support both the legacy and current image name format.
222-
name_pattern = \
223-
re.compile('^sagemaker(?:-rl)?-(tensorflow|mxnet|chainer|pytorch|scikit-learn):(.*)-(.*?)-(py2|py3)$')
224-
legacy_name_pattern = re.compile('^sagemaker-(tensorflow|mxnet)-(py2|py3)-(cpu|gpu):(.*)$')
222+
name_pattern = re.compile(
223+
r'^sagemaker(?:-rl)?-(tensorflow|mxnet|chainer|pytorch|scikit-learn):(.*)-(.*?)-(py2|py3)$')
224+
legacy_name_pattern = re.compile(
225+
r'^sagemaker-(tensorflow|mxnet)-(py2|py3)-(cpu|gpu):(.*)$')
226+
225227
name_match = name_pattern.match(sagemaker_match.group(8))
226228
legacy_match = legacy_name_pattern.match(sagemaker_match.group(8))
227229

src/sagemaker/local/entities.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
logger.setLevel(logging.WARNING)
3030

3131
_UNUSED_ARN = 'local:arn-does-not-matter'
32-
HEALTH_CHECK_TIMEOUT_LIMIT = 30
32+
HEALTH_CHECK_TIMEOUT_LIMIT = 120
3333

3434

3535
class _LocalTrainingJob(object):
@@ -405,7 +405,7 @@ def _wait_for_serving_container(serving_port):
405405

406406
endpoint_url = 'http://localhost:%s/ping' % serving_port
407407
while True:
408-
i += 1
408+
i += 5
409409
if i >= HEALTH_CHECK_TIMEOUT_LIMIT:
410410
raise RuntimeError('Giving up, endpoint didn\'t launch correctly')
411411

@@ -416,7 +416,7 @@ def _wait_for_serving_container(serving_port):
416416
else:
417417
return
418418

419-
time.sleep(1)
419+
time.sleep(5)
420420

421421

422422
def _perform_request(endpoint_url, pool_manager=None):

src/sagemaker/local/image.py

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,9 @@
3939

4040
CONTAINER_PREFIX = 'algo'
4141
DOCKER_COMPOSE_FILENAME = 'docker-compose.yaml'
42+
DOCKER_COMPOSE_HTTP_TIMEOUT_ENV = 'COMPOSE_HTTP_TIMEOUT'
43+
DOCKER_COMPOSE_HTTP_TIMEOUT = '120'
44+
4245

4346
# Environment variables to be set during training
4447
REGION_ENV_NAME = 'AWS_REGION'
@@ -101,7 +104,8 @@ def train(self, input_data_config, output_data_config, hyperparameters, job_name
101104
os.mkdir(shared_dir)
102105

103106
data_dir = self._create_tmp_folder()
104-
volumes = self._prepare_training_volumes(data_dir, input_data_config, hyperparameters)
107+
volumes = self._prepare_training_volumes(data_dir, input_data_config, output_data_config,
108+
hyperparameters)
105109

106110
# Create the configuration files for each container that we will create
107111
# Each container will map the additional local volumes (if any).
@@ -278,7 +282,8 @@ def write_config_files(self, host, hyperparameters, input_data_config):
278282
_write_json_file(os.path.join(config_path, 'resourceconfig.json'), resource_config)
279283
_write_json_file(os.path.join(config_path, 'inputdataconfig.json'), json_input_data_config)
280284

281-
def _prepare_training_volumes(self, data_dir, input_data_config, hyperparameters):
285+
def _prepare_training_volumes(self, data_dir, input_data_config, output_data_config,
286+
hyperparameters):
282287
shared_dir = os.path.join(self.container_root, 'shared')
283288
model_dir = os.path.join(self.container_root, 'model')
284289
volumes = []
@@ -306,6 +311,14 @@ def _prepare_training_volumes(self, data_dir, input_data_config, hyperparameters
306311
# Also mount a directory that all the containers can access.
307312
volumes.append(_Volume(shared_dir, '/opt/ml/shared'))
308313

314+
parsed_uri = urlparse(output_data_config['S3OutputPath'])
315+
if parsed_uri.scheme == 'file' \
316+
and sagemaker.rl.estimator.SAGEMAKER_OUTPUT_LOCATION in hyperparameters:
317+
intermediate_dir = os.path.join(parsed_uri.path, 'output', 'intermediate')
318+
if not os.path.exists(intermediate_dir):
319+
os.makedirs(intermediate_dir)
320+
volumes.append(_Volume(intermediate_dir, '/opt/ml/output/intermediate'))
321+
309322
return volumes
310323

311324
def _prepare_serving_volumes(self, model_location):
@@ -359,6 +372,9 @@ def _generate_compose_file(self, command, additional_volumes=None, additional_en
359372
additional_env_var_list = ['{}={}'.format(k, v) for k, v in additional_env_vars.items()]
360373
environment.extend(additional_env_var_list)
361374

375+
if os.environ.get(DOCKER_COMPOSE_HTTP_TIMEOUT_ENV) is None:
376+
os.environ[DOCKER_COMPOSE_HTTP_TIMEOUT_ENV] = DOCKER_COMPOSE_HTTP_TIMEOUT
377+
362378
if command == 'train':
363379
optml_dirs = {'output', 'output/data', 'input'}
364380

src/sagemaker/mxnet/README.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -209,15 +209,15 @@ If you were previously relying on the default save method, you can now import on
209209
210210
save(args.model_dir, model)
211211
212-
Lastly, if you were relying on the container launching a parameter server for use with distributed training, you must now set ``distribution`` to the following dictionary when creating an MXNet estimator:
212+
Lastly, if you were relying on the container launching a parameter server for use with distributed training, you must now set ``distributions`` to the following dictionary when creating an MXNet estimator:
213213

214214
.. code:: python
215215
216216
from sagemaker.mxnet import MXNet
217217
218218
estimator = MXNet('path-to-distributed-training-script.py',
219219
...,
220-
distribution={'parameter_server': {'enabled': True}})
220+
distributions={'parameter_server': {'enabled': True}})
221221
222222
223223
Using third-party libraries
@@ -323,7 +323,7 @@ The following are optional arguments. When you create an ``MXNet`` object, you c
323323
framework_version and py_version. Refer to: `SageMaker MXNet Docker Containers
324324
<#sagemaker-mxnet-docker-containers>`_ for details on what the Official images support
325325
and where to find the source code to build your custom image.
326-
- ``distribution`` For versions 1.3 and above only.
326+
- ``distributions`` For versions 1.3 and above only.
327327
Specifies information for how to run distributed training.
328328
To launch a parameter server during training, set this argument to:
329329

src/sagemaker/mxnet/estimator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def __init__(self, entry_point, source_dir=None, hyperparameters=None, py_versio
6767
Examples:
6868
123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0
6969
custom-image:latest.
70-
distribution (dict): A dictionary with information on how to run distributed training
70+
distributions (dict): A dictionary with information on how to run distributed training
7171
(default: None).
7272
**kwargs: Additional kwargs passed to the :class:`~sagemaker.estimator.Framework` constructor.
7373
"""

src/sagemaker/tensorflow/__init__.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,5 @@
1919
# classes for tensorflow serving. Currently tensorflow_serving_api can only be pip-installed for python 2.
2020
sys.path.append(os.path.dirname(__file__))
2121

22-
from distutils.version import LooseVersion # noqa: E402, F401 pylint: disable=no-name-in-module
23-
import tensorflow # noqa: E402, F401
24-
25-
if LooseVersion(tensorflow.__version__) < LooseVersion("1.3.0"): # pylint: disable=no-member
26-
message = 'Tensorflow version must be >= 1.3.0. Current version: {}'.format(
27-
tensorflow.__version__) # pylint: disable=no-member
28-
raise AssertionError(message)
29-
3022
from sagemaker.tensorflow.estimator import TensorFlow # noqa: E402, F401
3123
from sagemaker.tensorflow.model import TensorFlowModel, TensorFlowPredictor # noqa: E402, F401

src/sagemaker/tensorflow/estimator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ def __init__(self, training_steps=None, evaluation_steps=None, checkpoint_path=N
199199
custom-image:latest.
200200
script_mode (bool): If set to True will the estimator will use the Script Mode containers (default: False).
201201
This will be ignored if py_version is set to 'py3'.
202-
distribution (dict): A dictionary with information on how to run distributed training
202+
distributions (dict): A dictionary with information on how to run distributed training
203203
(default: None). Currently we only support distributed training with parameter servers. To enable it
204204
use the following setup:
205205
{

src/sagemaker/tensorflow/serving.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
from __future__ import absolute_import
1414

1515
import logging
16-
1716
import sagemaker
1817
from sagemaker.content_types import CONTENT_TYPE_JSON
1918
from sagemaker.fw_utils import create_image_uri
@@ -144,7 +143,6 @@ def _get_image_uri(self, instance_type):
144143
if self.image:
145144
return self.image
146145

147-
# reuse standard image uri function, then strip unwanted python component
148146
region_name = self.sagemaker_session.boto_region_name
149147
return create_image_uri(region_name, Model.FRAMEWORK_NAME, instance_type,
150148
self._framework_version)

src/sagemaker/utils.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import errno
1616
import os
17+
import random
1718
import re
1819
import sys
1920
import tarfile
@@ -64,6 +65,14 @@ def name_from_base(base, max_length=63, short=False):
6465
return '{}-{}'.format(trimmed_base, timestamp)
6566

6667

68+
def unique_name_from_base(base, max_length=63):
69+
unique = '%04x' % random.randrange(16**4) # 4-digit hex
70+
ts = str(int(time.time()))
71+
available_length = max_length - 2 - len(ts) - len(unique)
72+
trimmed = base[:available_length]
73+
return '{}-{}-{}'.format(trimmed, ts, unique)
74+
75+
6776
def airflow_name_from_base(base, max_length=63, short=False):
6877
"""Append airflow execution_date macro (https://airflow.apache.org/code.html?#macros)
6978
to the provided string. The macro will beevaluated in Airflow operator runtime.

tests/conftest.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,11 @@ def mxnet_version(request):
9595
return request.param
9696

9797

98+
@pytest.fixture(scope='module', params=['1.3', '1.3.0'])
99+
def ei_mxnet_version(request):
100+
return request.param
101+
102+
98103
@pytest.fixture(scope='module', params=['0.4', '0.4.0'])
99104
def pytorch_version(request):
100105
return request.param
@@ -112,6 +117,11 @@ def tf_version(request):
112117
return request.param
113118

114119

120+
@pytest.fixture(scope='module', params=['1.11', '1.11.0'])
121+
def ei_tf_version(request):
122+
return request.param
123+
124+
115125
@pytest.fixture(scope='module', params=['0.10.1', '0.10.1', '0.11', '0.11.0'])
116126
def rl_coach_tf_version(request):
117127
return request.param

0 commit comments

Comments
 (0)