@@ -130,7 +130,7 @@ def _tune(
130
130
hyperparameter_ranges = None ,
131
131
job_name = None ,
132
132
warm_start_config = None ,
133
- wait_till_terminal = True ,
133
+ wait = True ,
134
134
max_jobs = 2 ,
135
135
max_parallel_jobs = 2 ,
136
136
early_stopping_type = "Off" ,
@@ -155,7 +155,7 @@ def _tune(
155
155
tuner .fit ([records , test_record_set ], job_name = job_name )
156
156
print ("Started hyperparameter tuning job with name:" + tuner .latest_tuning_job .name )
157
157
158
- if wait_till_terminal :
158
+ if wait :
159
159
tuner .wait ()
160
160
161
161
return tuner
@@ -388,7 +388,7 @@ def test_tuning_kmeans_identical_dataset_algorithm_tuner_from_non_terminal_paren
388
388
kmeans_train_set ,
389
389
job_name = parent_tuning_job_name ,
390
390
hyperparameter_ranges = hyperparameter_ranges ,
391
- wait_till_terminal = False ,
391
+ wait = False ,
392
392
max_parallel_jobs = 1 ,
393
393
max_jobs = 1 ,
394
394
)
@@ -453,15 +453,9 @@ def test_tuning_lda(sagemaker_session, cpu_instance_type):
453
453
)
454
454
455
455
tuning_job_name = unique_name_from_base ("test-lda" , max_length = 32 )
456
+ print ("Started hyperparameter tuning job with name:" + tuning_job_name )
456
457
tuner .fit ([record_set , test_record_set ], mini_batch_size = 1 , job_name = tuning_job_name )
457
458
458
- latest_tuning_job_name = tuner .latest_tuning_job .name
459
-
460
- print ("Started hyperparameter tuning job with name:" + latest_tuning_job_name )
461
-
462
- time .sleep (15 )
463
- tuner .wait ()
464
-
465
459
attached_tuner = HyperparameterTuner .attach (
466
460
tuning_job_name , sagemaker_session = sagemaker_session
467
461
)
@@ -575,12 +569,8 @@ def test_tuning_mxnet(
575
569
)
576
570
577
571
tuning_job_name = unique_name_from_base ("tune-mxnet" , max_length = 32 )
578
- tuner .fit ({"train" : train_input , "test" : test_input }, job_name = tuning_job_name )
579
-
580
572
print ("Started hyperparameter tuning job with name:" + tuning_job_name )
581
-
582
- time .sleep (15 )
583
- tuner .wait ()
573
+ tuner .fit ({"train" : train_input , "test" : test_input }, job_name = tuning_job_name )
584
574
585
575
best_training_job = tuner .best_training_job ()
586
576
with timeout_and_delete_endpoint_by_name (best_training_job , sagemaker_session ):
@@ -628,12 +618,8 @@ def test_tuning_tf(
628
618
)
629
619
630
620
tuning_job_name = unique_name_from_base ("tune-tf" , max_length = 32 )
631
- tuner .fit (inputs , job_name = tuning_job_name )
632
-
633
621
print ("Started hyperparameter tuning job with name: " + tuning_job_name )
634
-
635
- time .sleep (15 )
636
- tuner .wait ()
622
+ tuner .fit (inputs , job_name = tuning_job_name )
637
623
638
624
639
625
def test_tuning_tf_vpc_multi (
@@ -686,12 +672,8 @@ def test_tuning_tf_vpc_multi(
686
672
)
687
673
688
674
tuning_job_name = unique_name_from_base ("tune-tf" , max_length = 32 )
689
- tuner .fit (inputs , job_name = tuning_job_name )
690
-
691
675
print (f"Started hyperparameter tuning job with name: { tuning_job_name } " )
692
-
693
- time .sleep (15 )
694
- tuner .wait ()
676
+ tuner .fit (inputs , job_name = tuning_job_name )
695
677
696
678
697
679
@pytest .mark .canary_quick
@@ -740,13 +722,9 @@ def test_tuning_chainer(
740
722
)
741
723
742
724
tuning_job_name = unique_name_from_base ("chainer" , max_length = 32 )
725
+ print ("Started hyperparameter tuning job with name: {}" .format (tuning_job_name ))
743
726
tuner .fit ({"train" : train_input , "test" : test_input }, job_name = tuning_job_name )
744
727
745
- print ("Started hyperparameter tuning job with name:" + tuning_job_name )
746
-
747
- time .sleep (15 )
748
- tuner .wait ()
749
-
750
728
best_training_job = tuner .best_training_job ()
751
729
with timeout_and_delete_endpoint_by_name (best_training_job , sagemaker_session ):
752
730
predictor = tuner .deploy (1 , cpu_instance_type )
@@ -812,13 +790,9 @@ def test_attach_tuning_pytorch(
812
790
)
813
791
814
792
tuning_job_name = unique_name_from_base ("pytorch" , max_length = 32 )
793
+ print ("Started hyperparameter tuning job with name: {}" .format (tuning_job_name ))
815
794
tuner .fit ({"training" : training_data }, job_name = tuning_job_name )
816
795
817
- print ("Started hyperparameter tuning job with name:" + tuning_job_name )
818
-
819
- time .sleep (15 )
820
- tuner .wait ()
821
-
822
796
endpoint_name = tuning_job_name
823
797
model_name = "model-name-1"
824
798
attached_tuner = HyperparameterTuner .attach (
@@ -887,17 +861,14 @@ def test_tuning_byo_estimator(sagemaker_session, cpu_instance_type):
887
861
max_parallel_jobs = 2 ,
888
862
)
889
863
864
+ tuning_job_name = unique_name_from_base ("byo" , 32 )
865
+ print ("Started hyperparameter tuning job with name {}:" .format (tuning_job_name ))
890
866
tuner .fit (
891
867
{"train" : s3_train_data , "test" : s3_train_data },
892
868
include_cls_metadata = False ,
893
- job_name = unique_name_from_base ( "byo" , 32 ) ,
869
+ job_name = tuning_job_name ,
894
870
)
895
871
896
- print ("Started hyperparameter tuning job with name:" + tuner .latest_tuning_job .name )
897
-
898
- time .sleep (15 )
899
- tuner .wait ()
900
-
901
872
best_training_job = tuner .best_training_job ()
902
873
with timeout_and_delete_endpoint_by_name (best_training_job , sagemaker_session ):
903
874
predictor = tuner .deploy (1 , cpu_instance_type , endpoint_name = best_training_job )
0 commit comments