Skip to content

Commit cda1275

Browse files
committed
update READMEs
1 parent c918bb3 commit cda1275

File tree

5 files changed

+67
-49
lines changed

5 files changed

+67
-49
lines changed

README.rst

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -153,9 +153,10 @@ Here is an end to end example of how to use a SageMaker Estimator:
153153
154154
# Configure an MXNet Estimator (no training happens yet)
155155
mxnet_estimator = MXNet('train.py',
156-
role="SageMakerRole",
156+
role='SageMakerRole',
157157
train_instance_type='ml.p2.xlarge',
158-
train_instance_count = 1)
158+
train_instance_count=1,
159+
framework_version='1.2.1')
159160
160161
# Starts a SageMaker training job and waits until completion.
161162
mxnet_estimator.fit('s3://my_bucket/my_training_data/')
@@ -183,9 +184,10 @@ We can take the example in `Using Estimators <#using-estimators>`__ , and use e
183184
184185
# Configure an MXNet Estimator (no training happens yet)
185186
mxnet_estimator = MXNet('train.py',
186-
role="SageMakerRole",
187+
role='SageMakerRole',
187188
train_instance_type='local',
188-
train_instance_count=1)
189+
train_instance_count=1,
190+
framework_version='1.2.1')
189191
190192
# In Local Mode, fit will pull the MXNet container Docker image and run it locally
191193
mxnet_estimator.fit('s3://my_bucket/my_training_data/')
@@ -239,7 +241,8 @@ Here is an end-to-end example:
239241
240242
mxnet_estimator = MXNet('train.py',
241243
train_instance_type='local',
242-
train_instance_count=1)
244+
train_instance_count=1,
245+
framework_version='1.2.1')
243246
244247
mxnet_estimator.fit('file:///tmp/my_training_data')
245248
transformer = mxnet_estimator.transformer(1, 'local', assemble_with='Line', max_payload=1)
@@ -504,10 +507,11 @@ To train a model using your own VPC, set the optional parameters ``subnets`` and
504507
505508
# Configure an MXNet Estimator with subnets and security groups from your VPC
506509
mxnet_vpc_estimator = MXNet('train.py',
507-
train_instance_type='ml.p2.xlarge',
508-
train_instance_count = 1,
509-
subnets=['subnet-1', 'subnet-2'],
510-
security_group_ids=['sg-1'])
510+
train_instance_type='ml.p2.xlarge',
511+
train_instance_count=1,
512+
framework_version='1.2.1',
513+
subnets=['subnet-1', 'subnet-2'],
514+
security_group_ids=['sg-1'])
511515
512516
# SageMaker Training Job will set VpcConfig and container instances will run in your VPC
513517
mxnet_vpc_estimator.fit('s3://my_bucket/my_training_data/')

src/sagemaker/chainer/README.rst

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,12 @@ Suppose that you already have an Chainer training script called
2828
.. code:: python
2929
3030
from sagemaker.chainer import Chainer
31-
chainer_estimator = Chainer(entry_point="chainer-train.py",
32-
role="SageMakerRole",
33-
train_instance_type="ml.p3.2xlarge",
34-
train_instance_count=1)
35-
chainer_estimator.fit("s3://bucket/path/to/training/data")
31+
chainer_estimator = Chainer(entry_point='chainer-train.py',
32+
role='SageMakerRole',
33+
train_instance_type='ml.p3.2xlarge',
34+
train_instance_count=1,
35+
framework_version='4.1.0')
36+
chainer_estimator.fit('s3://bucket/path/to/training/data')
3637
3738
Where the S3 URL is a path to your training data, within Amazon S3. The constructor keyword arguments define how
3839
SageMaker runs your training script and are discussed in detail in a later section.
@@ -107,12 +108,13 @@ directories ('train' and 'test').
107108

108109
.. code:: python
109110
110-
chainer_estimator = Chainer("chainer-train.py",
111-
train_instance_type="ml.p3.2xlarge",
112-
train_instance_count=1,
113-
hyperparameters = {'epochs': 20, 'batch-size': 64, 'learning-rate':0.1})
111+
chainer_estimator = Chainer('chainer-train.py',
112+
train_instance_type='ml.p3.2xlarge',
113+
train_instance_count=1,
114+
framework_version='4.1.0',
115+
hyperparameters = {'epochs': 20, 'batch-size': 64, 'learning-rate': 0.1})
114116
chainer_estimator.fit({'train': 's3://my-data-bucket/path/to/my/training/data',
115-
'test': 's3://my-data-bucket/path/to/my/test/data'})
117+
'test': 's3://my-data-bucket/path/to/my/test/data'})
116118
117119
118120
Chainer Estimators
@@ -280,13 +282,14 @@ operation.
280282
.. code:: python
281283
282284
# Train my estimator
283-
chainer_estimator = Chainer(entry_point="train_and_deploy.py",
284-
train_instance_type="ml.p3.2xlarge",
285-
train_instance_count=1)
286-
chainer_estimator.fit("s3://my_bucket/my_training_data/")
285+
chainer_estimator = Chainer(entry_point='train_and_deploy.py',
286+
train_instance_type='ml.p3.2xlarge',
287+
train_instance_count=1,
288+
framework_version='4.1.0')
289+
chainer_estimator.fit('s3://my_bucket/my_training_data/')
287290
288291
# Deploy my estimator to a SageMaker Endpoint and get a Predictor
289-
predictor = chainer_estimator.deploy(instance_type="ml.m4.xlarge",
292+
predictor = chainer_estimator.deploy(instance_type='ml.m4.xlarge',
290293
initial_instance_count=1)
291294
292295
# `data` is a NumPy array or a Python list.

src/sagemaker/mxnet/README.rst

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,12 @@ Suppose that you already have an MXNet training script called
1717
.. code:: python
1818
1919
from sagemaker.mxnet import MXNet
20-
mxnet_estimator = MXNet("mxnet-train.py",
21-
role="SageMakerRole",
22-
train_instance_type="ml.p3.2xlarge",
23-
train_instance_count=1)
24-
mxnet_estimator.fit("s3://bucket/path/to/training/data")
20+
mxnet_estimator = MXNet('mxnet-train.py',
21+
role='SageMakerRole',
22+
train_instance_type='ml.p3.2xlarge',
23+
train_instance_count=1,
24+
framework_version='1.2.1')
25+
mxnet_estimator.fit('s3://bucket/path/to/training/data')
2526
2627
Where the s3 url is a path to your training data, within Amazon S3. The constructor keyword arguments define how SageMaker runs your training script and are discussed, in detail, in a later section.
2728

@@ -97,10 +98,11 @@ You run MXNet training scripts on SageMaker by creating ``MXNet`` Estimators. Sa
9798

9899
.. code:: python
99100
100-
mxnet_estimator = MXNet("train.py",
101-
train_instance_type="ml.p2.xlarge",
102-
train_instance_count=1)
103-
mxnet_estimator.fit("s3://my_bucket/my_training_data/")
101+
mxnet_estimator = MXNet('train.py',
102+
train_instance_type='ml.p2.xlarge',
103+
train_instance_count=1,
104+
framework_version='1.2.1')
105+
mxnet_estimator.fit('s3://my_bucket/my_training_data/')
104106
105107
MXNet Estimators
106108
^^^^^^^^^^^^^^^^
@@ -302,10 +304,11 @@ After calling ``fit``, you can call ``deploy`` on an ``MXNet`` Estimator to crea
302304
.. code:: python
303305
304306
# Train my estimator
305-
mxnet_estimator = MXNet("train.py",
306-
train_instance_type="ml.p2.xlarge",
307-
train_instance_count=1)
308-
mxnet_estimator.fit("s3://my_bucket/my_training_data/")
307+
mxnet_estimator = MXNet('train.py',
308+
train_instance_type='ml.p2.xlarge',
309+
train_instance_count=1,
310+
framework_version='1.2.1')
311+
mxnet_estimator.fit('s3://my_bucket/my_training_data/')
309312
310313
# Deploy my estimator to a SageMaker Endpoint and get a Predictor
311314
predictor = mxnet_estimator.deploy(instance_type='ml.m4.xlarge',

src/sagemaker/pytorch/README.rst

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,8 @@ You can then setup a ``PyTorch`` Estimator with keyword arguments to point to th
4848
pytorch_estimator = PyTorch(entry_point='pytorch-train.py',
4949
role='SageMakerRole',
5050
train_instance_type='ml.p3.2xlarge',
51-
train_instance_count=1)
51+
train_instance_count=1,
52+
framework_version='0.4.0')
5253
5354
After that, you simply tell the estimator to start a training job and provide an S3 URL
5455
that is the path to your training data within Amazon S3:
@@ -136,9 +137,10 @@ directories ('train' and 'test').
136137
pytorch_estimator = PyTorch('pytorch-train.py',
137138
train_instance_type='ml.p3.2xlarge',
138139
train_instance_count=1,
139-
hyperparameters = {'epochs': 20, 'batch-size': 64, 'learning-rate':0.1})
140+
framework_version='0.4.0',
141+
hyperparameters = {'epochs': 20, 'batch-size': 64, 'learning-rate': 0.1})
140142
pytorch_estimator.fit({'train': 's3://my-data-bucket/path/to/my/training/data',
141-
'test': 's3://my-data-bucket/path/to/my/test/data'})
143+
'test': 's3://my-data-bucket/path/to/my/test/data'})
142144
143145
144146
PyTorch Estimators
@@ -318,7 +320,8 @@ operation.
318320
# Train my estimator
319321
pytorch_estimator = PyTorch(entry_point='train_and_deploy.py',
320322
train_instance_type='ml.p3.2xlarge',
321-
train_instance_count=1)
323+
train_instance_count=1,
324+
framework_version='0.4.0')
322325
pytorch_estimator.fit('s3://my_bucket/my_training_data/')
323326
324327
# Deploy my estimator to a SageMaker Endpoint and get a Predictor

src/sagemaker/tensorflow/README.rst

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@ follows:
2626
2727
tf_estimator = TensorFlow(entry_point='tf-train.py', role='SageMakerRole',
2828
training_steps=10000, evaluation_steps=100,
29-
train_instance_count=1, train_instance_type='ml.p2.xlarge')
29+
train_instance_count=1, train_instance_type='ml.p2.xlarge',
30+
framework_version=1.10.0)
3031
tf_estimator.fit('s3://bucket/path/to/training/data')
3132
3233
Where the S3 url is a path to your training data, within Amazon S3. The
@@ -365,7 +366,8 @@ The following code sample shows how to train a custom TensorFlow script 'tf-trai
365366
366367
tf_estimator = TensorFlow(entry_point='tf-train.py', role='SageMakerRole',
367368
training_steps=10000, evaluation_steps=100,
368-
train_instance_count=1, train_instance_type='ml.p2.xlarge')
369+
train_instance_count=1, train_instance_type='ml.p2.xlarge',
370+
framework_version='1.10.0')
369371
tf_estimator.fit('s3://bucket/path/to/training/data')
370372
371373
sagemaker.tensorflow.TensorFlow class
@@ -582,7 +584,8 @@ estimator pointing to the previous checkpoint path:
582584
tf_estimator = TensorFlow('tf-train.py', role='SageMakerRole',
583585
checkpoint_path=previous_checkpoint_path
584586
training_steps=10000, evaluation_steps=100,
585-
train_instance_count=1, train_instance_type='ml.p2.xlarge')
587+
train_instance_count=1, train_instance_type='ml.p2.xlarge',
588+
framework_version='1.10.0')
586589
tf_estimator.fit('s3://bucket/path/to/training/data')
587590
588591
@@ -622,7 +625,8 @@ like this:
622625
623626
from sagemaker.tensorflow import TensorFlow
624627
625-
estimator = TensorFlow(entry_point='tf-train.py', ..., train_instance_count=1, train_instance_type='ml.c4.xlarge')
628+
estimator = TensorFlow(entry_point='tf-train.py', ..., train_instance_count=1,
629+
train_instance_type='ml.c4.xlarge', framework_version='1.10.0')
626630
627631
estimator.fit(inputs)
628632
@@ -687,7 +691,8 @@ The following code adds a prediction request to the previous code example:
687691

688692
.. code:: python
689693
690-
estimator = TensorFlow(entry_point='tf-train.py', ..., train_instance_count=1, train_instance_type='ml.c4.xlarge')
694+
estimator = TensorFlow(entry_point='tf-train.py', ..., train_instance_count=1,
695+
train_instance_type='ml.c4.xlarge', framework_version='1.10.0')
691696
692697
estimator.fit(inputs)
693698
@@ -822,7 +827,7 @@ In your ``entry_point`` script, you can use ``PipeModeDataset`` like a ``Dataset
822827
'data': tf.decode_raw(parsed['data'], tf.float64)
823828
}, parsed['labels'])
824829
825-
def train_input_fn(training_dir, hyperparameters):
830+
def train_input_fn(training_dir, hyperparameters):
826831
ds = PipeModeDataset(channel='training', record_format='TFRecord')
827832
ds = ds.repeat(20)
828833
ds = ds.prefetch(10)
@@ -841,7 +846,7 @@ To run training job with Pipe input mode, pass in ``input_mode='Pipe'`` to your
841846
tf_estimator = TensorFlow(entry_point='tf-train-with-pipemodedataset.py', role='SageMakerRole',
842847
training_steps=10000, evaluation_steps=100,
843848
train_instance_count=1, train_instance_type='ml.p2.xlarge',
844-
input_mode='Pipe')
849+
framework_version='1.10.0', input_mode='Pipe')
845850
846851
tf_estimator.fit('s3://bucket/path/to/training/data')
847852
@@ -854,7 +859,7 @@ If your TFRecords are compressed, you can train on Gzipped TF Records by passing
854859
from sagemaker.session import s3_input
855860
856861
train_s3_input = s3_input('s3://bucket/path/to/training/data', compression='Gzip')
857-
tf_estimator.fit(train_s3_input)
862+
tf_estimator.fit(train_s3_input)
858863
859864
860865
You can learn more about ``PipeModeDataset`` in the sagemaker-tensorflow-extensions repository: https://github.com/aws/sagemaker-tensorflow-extensions

0 commit comments

Comments
 (0)