2
2
import argparse
3
3
import os
4
4
import numpy as np
5
- < << << << HEAD
6
- < << << << HEAD
7
5
import json
8
- == == == =
9
- import sys
10
- > >> >> >> Scriptmode single machine training implementation (#78)
11
- == == == =
12
- >> >> >> > Add Keras support (#126)
13
6
14
7
15
8
def _parse_args ():
@@ -19,7 +12,6 @@ def _parse_args():
19
12
# hyperparameters sent by the client are passed as command-line arguments to the script.
20
13
parser .add_argument ('--epochs' , type = int , default = 1 )
21
14
# Data, model, and output directories
22
- << < << << HEAD
23
15
parser .add_argument ('--model-dir' , type = str , default = os .environ ['SM_MODEL_DIR' ])
24
16
parser .add_argument ('--train' , type = str , default = os .environ ['SM_CHANNEL_TRAINING' ])
25
17
parser .add_argument ('--hosts' , type = list , default = json .loads (os .environ ['SM_HOSTS' ]))
@@ -28,32 +20,12 @@ def _parse_args():
28
20
return parser .parse_known_args ()
29
21
30
22
31
- == == == =
32
- parser .add_argument ('--output-data-dir' , type = str , default = os .environ ['SM_OUTPUT_DATA_DIR' ])
33
- parser .add_argument ('--model-dir' , type = str , default = os .environ ['SM_MODEL_DIR' ])
34
- parser .add_argument ('--train' , type = str , default = os .environ ['SM_CHANNEL_TRAINING' ])
35
-
36
- return parser .parse_known_args ()
37
- << < << << HEAD
38
- #
39
- >> > >> >> Scriptmode single machine training implementation (#78)
40
- == == == =
41
-
42
-
43
- >> > >> >> Add distributed training support (#98)
44
23
def _load_training_data (base_dir ):
45
24
x_train = np .load (os .path .join (base_dir , 'train' , 'x_train.npy' ))
46
25
y_train = np .load (os .path .join (base_dir , 'train' , 'y_train.npy' ))
47
26
return x_train , y_train
48
27
49
- << < << << HEAD
50
- << < << << HEAD
51
-
52
- == == == =
53
- >> > >> > > Scriptmode single machine training implementation (#78)
54
- == == == =
55
28
56
- >> > >> >> Add distributed training support (#98)
57
29
def _load_testing_data (base_dir ):
58
30
x_test = np .load (os .path .join (base_dir , 'test' , 'x_test.npy' ))
59
31
y_test = np .load (os .path .join (base_dir , 'test' , 'y_test.npy' ))
@@ -63,15 +35,7 @@ def _load_testing_data(base_dir):
63
35
args , unknown = _parse_args ()
64
36
65
37
model = tf .keras .models .Sequential ([
66
- < << << << HEAD
67
- << < << << HEAD
68
- tf .keras .layers .Flatten (input_shape = (28 , 28 )),
69
- == == == =
70
- tf .keras .layers .Flatten (),
71
- > >> >> >> Scriptmode single machine training implementation (#78)
72
- == == == =
73
38
tf .keras .layers .Flatten (input_shape = (28 , 28 )),
74
- >> >> >> > Add distributed training support (#98)
75
39
tf .keras .layers .Dense (512 , activation = tf .nn .relu ),
76
40
tf .keras .layers .Dropout (0.2 ),
77
41
tf .keras .layers .Dense (10 , activation = tf .nn .softmax )
@@ -84,9 +48,5 @@ def _load_testing_data(base_dir):
84
48
x_test , y_test = _load_testing_data (args .train )
85
49
model .fit (x_train , y_train , epochs = args .epochs )
86
50
model .evaluate (x_test , y_test )
87
- << < << << HEAD
88
51
if args .current_host == args .hosts [0 ]:
89
- model .save (os .path .join ('/opt/ml/model' , 'my_model.h5' ))
90
- == == == =
91
- model .save (os .path .join (args .model_dir , 'my_model.h5' ))
92
- >> > >> >> Scriptmode single machine training implementation (#78)
52
+ model .save (os .path .join ('/opt/ml/model' , 'my_model.h5' ))
0 commit comments