Skip to content

Commit f8feda2

Browse files
prateekch91Prateek Chauhan
andauthored
Update Sagemaker Neo Notebooks examples to PyTorch 1.6 (#2018)
Co-authored-by: Prateek Chauhan <[email protected]>
1 parent 24fcd49 commit f8feda2

File tree

5 files changed

+49
-49
lines changed

5 files changed

+49
-49
lines changed

sagemaker_neo_compilation_jobs/pytorch_torchvision/code/resnet18.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
import numpy as np
88
import torch
9+
import neopytorch
910
import torchvision.transforms as transforms
1011
from PIL import Image # Training container doesn't have this package
1112

@@ -55,24 +56,23 @@ def transform_fn(model, payload, request_content_type,
5556
def model_fn(model_dir):
5657

5758
logger.info('model_fn')
58-
with torch.neo.config(model_dir=model_dir, neo_runtime=True):
59-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60-
# The compiled model is saved as "compiled.pt"
61-
model = torch.jit.load(os.path.join(model_dir, 'compiled.pt'))
62-
model = model.to(device)
63-
64-
# It is recommended to run warm-up inference during model load
65-
sample_input_path = os.path.join(model_dir, 'sample_input.pkl')
66-
with open(sample_input_path, 'rb') as input_file:
67-
model_input = pickle.load(input_file)
68-
if torch.is_tensor(model_input):
69-
model_input = model_input.to(device)
70-
model(model_input)
71-
elif isinstance(model_input, tuple):
72-
model_input = (inp.to(device)
73-
for inp in model_input if torch.is_tensor(inp))
74-
model(*model_input)
75-
else:
76-
print("Only supports a torch tensor or a tuple of torch tensors")
77-
78-
return model
59+
neopytorch.config(model_dir=model_dir, neo_runtime=True)
60+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
61+
# The compiled model is saved as "compiled.pt"
62+
model = torch.jit.load(os.path.join(model_dir, 'compiled.pt'), map_location=device)
63+
64+
# It is recommended to run warm-up inference during model load
65+
sample_input_path = os.path.join(model_dir, 'sample_input.pkl')
66+
with open(sample_input_path, 'rb') as input_file:
67+
model_input = pickle.load(input_file)
68+
if torch.is_tensor(model_input):
69+
model_input = model_input.to(device)
70+
model(model_input)
71+
elif isinstance(model_input, tuple):
72+
model_input = (inp.to(device)
73+
for inp in model_input if torch.is_tensor(inp))
74+
model(*model_input)
75+
else:
76+
print("Only supports a torch tensor or a tuple of torch tensors")
77+
78+
return model

sagemaker_neo_compilation_jobs/pytorch_torchvision/pytorch_torchvision_neo.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@
7979
"data_shape = '{\"input0\":[1,3,224,224]}'\n",
8080
"target_device = 'ml_c5'\n",
8181
"framework = 'PYTORCH'\n",
82-
"framework_version = '1.4.0'\n",
82+
"framework_version = '1.6'\n",
8383
"compiled_model_path = 's3://{}/{}/output'.format(bucket, compilation_job_name)"
8484
]
8585
},

sagemaker_neo_compilation_jobs/pytorch_vgg19_bn/code/vgg19_bn_compiled.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
import numpy as np
88
import torch
9+
import neopytorch
910
import torchvision.transforms as transforms
1011
from PIL import Image # Training container doesn't have this package
1112

@@ -55,24 +56,23 @@ def transform_fn(model, payload, request_content_type,
5556
def model_fn(model_dir):
5657

5758
logger.info('model_fn')
58-
with torch.neo.config(model_dir=model_dir, neo_runtime=True):
59-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60-
# The compiled model is saved as "compiled.pt"
61-
model = torch.jit.load(os.path.join(model_dir, 'compiled.pt'))
62-
model = model.to(device)
59+
neopytorch.config(model_dir=model_dir, neo_runtime=True)
60+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
61+
# The compiled model is saved as "compiled.pt"
62+
model = torch.jit.load(os.path.join(model_dir, 'compiled.pt'), map_location=device)
6363

64-
# It is recommended to run warm-up inference during model load
65-
sample_input_path = os.path.join(model_dir, 'sample_input.pkl')
66-
with open(sample_input_path, 'rb') as input_file:
67-
model_input = pickle.load(input_file)
68-
if torch.is_tensor(model_input):
69-
model_input = model_input.to(device)
70-
model(model_input)
71-
elif isinstance(model_input, tuple):
72-
model_input = (inp.to(device)
73-
for inp in model_input if torch.is_tensor(inp))
74-
model(*model_input)
75-
else:
76-
print("Only supports a torch tensor or a tuple of torch tensors")
64+
# It is recommended to run warm-up inference during model load
65+
sample_input_path = os.path.join(model_dir, 'sample_input.pkl')
66+
with open(sample_input_path, 'rb') as input_file:
67+
model_input = pickle.load(input_file)
68+
if torch.is_tensor(model_input):
69+
model_input = model_input.to(device)
70+
model(model_input)
71+
elif isinstance(model_input, tuple):
72+
model_input = (inp.to(device)
73+
for inp in model_input if torch.is_tensor(inp))
74+
model(*model_input)
75+
else:
76+
print("Only supports a torch tensor or a tuple of torch tensors")
7777

78-
return model
78+
return model

sagemaker_neo_compilation_jobs/pytorch_vgg19_bn/code/vgg19_bn_uncompiled.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
import numpy as np
88
import torch
9+
import neopytorch
910
import torchvision.transforms as transforms
1011
from PIL import Image # Training container doesn't have this package
1112

@@ -55,10 +56,9 @@ def transform_fn(model, payload, request_content_type,
5556
def model_fn(model_dir):
5657

5758
logger.info('model_fn')
58-
with torch.neo.config(model_dir=model_dir, neo_runtime=True):
59-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60-
# The compiled model is saved as "compiled.pt"
61-
model = torch.jit.load(os.path.join(model_dir, 'model.pth'))
62-
model = model.to(device)
59+
neopytorch.config(model_dir=model_dir, neo_runtime=True)
60+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
61+
# The compiled model is saved as "compiled.pt"
62+
model = torch.jit.load(os.path.join(model_dir, 'model.pth'), map_location=device)
6363

64-
return model
64+
return model

sagemaker_neo_compilation_jobs/pytorch_vgg19_bn/pytorch-vgg19-bn.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"metadata": {},
2525
"outputs": [],
2626
"source": [
27-
"!~/anaconda3/envs/pytorch_p36/bin/pip install torch==1.4.0 torchvision==0.5.0"
27+
"!~/anaconda3/envs/pytorch_p36/bin/pip install torch==1.6.0 torchvision==0.7.0"
2828
]
2929
},
3030
{
@@ -127,7 +127,7 @@
127127
"data_shape = '{\"input0\":[1,3,224,224]}'\n",
128128
"target_device = 'ml_c5'\n",
129129
"framework = 'pytorch'\n",
130-
"framework_version = '1.4.0'\n",
130+
"framework_version = '1.6'\n",
131131
"compiled_model_path = 's3://{}/{}/output'.format(bucket, compilation_job_name)\n",
132132
"\n",
133133
"inference_image_uri = image_uris.retrieve(f'neo-{framework}', region, framework_version, instance_type=target_device)"

0 commit comments

Comments
 (0)