16
16
import os
17
17
18
18
import numpy as np
19
- < << << << HEAD
20
- < << << << HEAD
21
- == == == =
22
- >> >> >> > Fix Keras test (#132)
23
19
import pytest
24
20
from sagemaker .tensorflow import serving , TensorFlow
25
21
26
22
from test .integration import RESOURCE_PATH
27
23
from test .integration .utils import processor , py_version # noqa: F401
28
- == == == =
29
- from sagemaker .tensorflow import serving , TensorFlow
30
-
31
- from test .integration import RESOURCE_PATH
32
- > >> >> >> Add Keras support (#126)
33
24
34
25
35
26
logging .basicConfig (level = logging .DEBUG )
36
27
37
28
38
- << < << << HEAD
39
- << < << << HEAD
40
- << < << << HEAD
41
29
@pytest .mark .skip (reason = "Serving part fails because of version mismatch." )
42
30
def test_keras_training (sagemaker_local_session , docker_image , tmpdir , framework_version ):
43
- == == == =
44
- def test_keras_training (sagemaker_local_session , docker_image , tmpdir ):
45
- >> > >> >> Add Keras support (#126)
46
- == == == =
47
- @pytest .fixture
48
- def local_mode_instance_type (processor ):
49
- instance_type = 'local' if processor == 'cpu' else 'local_gpu'
50
- return instance_type
51
-
52
-
53
- def test_keras_training (sagemaker_local_session , docker_image , tmpdir , local_mode_instance_type ):
54
- >> >> >> > Fix Keras test (#132)
55
- == == == =
56
- @pytest .mark .skip_gpu
57
- def test_keras_training (sagemaker_local_session , docker_image , tmpdir ):
58
- >> > >> > > Skip keras local mode test on gpu and use random port for serving in the test (#134)
59
31
entry_point = os .path .join (RESOURCE_PATH , 'keras_inception.py' )
60
32
output_path = 'file://{}' .format (tmpdir )
61
33
62
34
estimator = TensorFlow (
63
35
entry_point = entry_point ,
64
36
role = 'SageMakerRole' ,
65
37
train_instance_count = 1 ,
66
- << << << < HEAD
67
- << < << << HEAD
68
- train_instance_type = 'local' ,
69
- << << << < HEAD
70
- << < << << HEAD
71
- == == == =
72
- train_instance_type = local_mode_instance_type ,
73
- >> >> >> > Fix Keras test (#132)
74
- == == == =
75
38
train_instance_type = 'local' ,
76
- >> >> >> > Skip keras local mode test on gpu and use random port for serving in the test (#134)
77
39
image_name = docker_image ,
78
40
sagemaker_session = sagemaker_local_session ,
79
41
model_dir = '/opt/ml/model' ,
80
42
output_path = output_path ,
81
43
framework_version = framework_version ,
82
- == == == =
83
- == == == =
84
- image_name = docker_image ,
85
- >> >> >> > Create parameter server in different thread (#129)
86
- sagemaker_session = sagemaker_local_session ,
87
- model_dir = '/opt/ml/model' ,
88
- output_path = output_path ,
89
- framework_version = '1.11.0' ,
90
- >> >> >> > Add Keras support (#126)
91
44
py_version = 'py3' )
92
45
93
46
estimator .fit ()
94
47
95
- << < << << HEAD
96
- << < << << HEAD
97
48
model = serving .Model (model_data = output_path ,
98
49
role = 'SageMakerRole' ,
99
50
framework_version = framework_version ,
100
- == == == =
101
- model = serving .Model (model_data = output_path , role = 'SageMakerRole' ,
102
- == == == =
103
- model = serving .Model (model_data = output_path ,
104
- role = 'SageMakerRole' ,
105
- >> >> >> > Create parameter server in different thread (#129)
106
- framework_version = '1.11.0' ,
107
- >> >> > >> Add Keras support (#126)
108
51
sagemaker_session = sagemaker_local_session )
109
52
110
53
predictor = model .deploy (initial_instance_count = 1 , instance_type = 'local' )
111
54
112
55
assert predictor .predict (np .random .randn (4 , 4 , 4 , 2 ) * 255 )
113
56
114
- predictor .delete_endpoint ()
57
+ predictor .delete_endpoint ()
0 commit comments