@@ -130,7 +130,7 @@ def _tune(
130
130
hyperparameter_ranges = None ,
131
131
job_name = None ,
132
132
warm_start_config = None ,
133
- wait_till_terminal = True ,
133
+ wait = True ,
134
134
max_jobs = 2 ,
135
135
max_parallel_jobs = 2 ,
136
136
early_stopping_type = "Off" ,
@@ -152,11 +152,8 @@ def _tune(
152
152
records = kmeans_estimator .record_set (kmeans_train_set [0 ][:100 ])
153
153
test_record_set = kmeans_estimator .record_set (kmeans_train_set [0 ][:100 ], channel = "test" )
154
154
155
- tuner .fit ([records , test_record_set ], job_name = job_name )
156
- print ("Started hyperparameter tuning job with name:" + tuner .latest_tuning_job .name )
157
-
158
- if wait_till_terminal :
159
- tuner .wait ()
155
+ print ("Started hyperparameter tuning job with name: {}" .format (job_name ))
156
+ tuner .fit ([records , test_record_set ], job_name = job_name , wait = wait )
160
157
161
158
return tuner
162
159
@@ -388,7 +385,7 @@ def test_tuning_kmeans_identical_dataset_algorithm_tuner_from_non_terminal_paren
388
385
kmeans_train_set ,
389
386
job_name = parent_tuning_job_name ,
390
387
hyperparameter_ranges = hyperparameter_ranges ,
391
- wait_till_terminal = False ,
388
+ wait = False ,
392
389
max_parallel_jobs = 1 ,
393
390
max_jobs = 1 ,
394
391
)
@@ -453,15 +450,9 @@ def test_tuning_lda(sagemaker_session, cpu_instance_type):
453
450
)
454
451
455
452
tuning_job_name = unique_name_from_base ("test-lda" , max_length = 32 )
453
+ print ("Started hyperparameter tuning job with name:" + tuning_job_name )
456
454
tuner .fit ([record_set , test_record_set ], mini_batch_size = 1 , job_name = tuning_job_name )
457
455
458
- latest_tuning_job_name = tuner .latest_tuning_job .name
459
-
460
- print ("Started hyperparameter tuning job with name:" + latest_tuning_job_name )
461
-
462
- time .sleep (15 )
463
- tuner .wait ()
464
-
465
456
attached_tuner = HyperparameterTuner .attach (
466
457
tuning_job_name , sagemaker_session = sagemaker_session
467
458
)
@@ -516,7 +507,7 @@ def test_stop_tuning_job(sagemaker_session, cpu_instance_type):
516
507
)
517
508
518
509
tuning_job_name = unique_name_from_base ("test-randomcutforest" , max_length = 32 )
519
- tuner .fit ([records , test_records ], tuning_job_name )
510
+ tuner .fit ([records , test_records ], tuning_job_name , wait = False )
520
511
521
512
time .sleep (15 )
522
513
@@ -575,12 +566,8 @@ def test_tuning_mxnet(
575
566
)
576
567
577
568
tuning_job_name = unique_name_from_base ("tune-mxnet" , max_length = 32 )
578
- tuner .fit ({"train" : train_input , "test" : test_input }, job_name = tuning_job_name )
579
-
580
569
print ("Started hyperparameter tuning job with name:" + tuning_job_name )
581
-
582
- time .sleep (15 )
583
- tuner .wait ()
570
+ tuner .fit ({"train" : train_input , "test" : test_input }, job_name = tuning_job_name )
584
571
585
572
best_training_job = tuner .best_training_job ()
586
573
with timeout_and_delete_endpoint_by_name (best_training_job , sagemaker_session ):
@@ -628,12 +615,8 @@ def test_tuning_tf(
628
615
)
629
616
630
617
tuning_job_name = unique_name_from_base ("tune-tf" , max_length = 32 )
631
- tuner .fit (inputs , job_name = tuning_job_name )
632
-
633
618
print ("Started hyperparameter tuning job with name: " + tuning_job_name )
634
-
635
- time .sleep (15 )
636
- tuner .wait ()
619
+ tuner .fit (inputs , job_name = tuning_job_name )
637
620
638
621
639
622
def test_tuning_tf_vpc_multi (
@@ -686,12 +669,8 @@ def test_tuning_tf_vpc_multi(
686
669
)
687
670
688
671
tuning_job_name = unique_name_from_base ("tune-tf" , max_length = 32 )
689
- tuner .fit (inputs , job_name = tuning_job_name )
690
-
691
672
print (f"Started hyperparameter tuning job with name: { tuning_job_name } " )
692
-
693
- time .sleep (15 )
694
- tuner .wait ()
673
+ tuner .fit (inputs , job_name = tuning_job_name )
695
674
696
675
697
676
@pytest .mark .canary_quick
@@ -740,13 +719,9 @@ def test_tuning_chainer(
740
719
)
741
720
742
721
tuning_job_name = unique_name_from_base ("chainer" , max_length = 32 )
722
+ print ("Started hyperparameter tuning job with name: {}" .format (tuning_job_name ))
743
723
tuner .fit ({"train" : train_input , "test" : test_input }, job_name = tuning_job_name )
744
724
745
- print ("Started hyperparameter tuning job with name:" + tuning_job_name )
746
-
747
- time .sleep (15 )
748
- tuner .wait ()
749
-
750
725
best_training_job = tuner .best_training_job ()
751
726
with timeout_and_delete_endpoint_by_name (best_training_job , sagemaker_session ):
752
727
predictor = tuner .deploy (1 , cpu_instance_type )
@@ -812,13 +787,9 @@ def test_attach_tuning_pytorch(
812
787
)
813
788
814
789
tuning_job_name = unique_name_from_base ("pytorch" , max_length = 32 )
790
+ print ("Started hyperparameter tuning job with name: {}" .format (tuning_job_name ))
815
791
tuner .fit ({"training" : training_data }, job_name = tuning_job_name )
816
792
817
- print ("Started hyperparameter tuning job with name:" + tuning_job_name )
818
-
819
- time .sleep (15 )
820
- tuner .wait ()
821
-
822
793
endpoint_name = tuning_job_name
823
794
model_name = "model-name-1"
824
795
attached_tuner = HyperparameterTuner .attach (
@@ -887,17 +858,14 @@ def test_tuning_byo_estimator(sagemaker_session, cpu_instance_type):
887
858
max_parallel_jobs = 2 ,
888
859
)
889
860
861
+ tuning_job_name = unique_name_from_base ("byo" , 32 )
862
+ print ("Started hyperparameter tuning job with name {}:" .format (tuning_job_name ))
890
863
tuner .fit (
891
864
{"train" : s3_train_data , "test" : s3_train_data },
892
865
include_cls_metadata = False ,
893
- job_name = unique_name_from_base ( "byo" , 32 ) ,
866
+ job_name = tuning_job_name ,
894
867
)
895
868
896
- print ("Started hyperparameter tuning job with name:" + tuner .latest_tuning_job .name )
897
-
898
- time .sleep (15 )
899
- tuner .wait ()
900
-
901
869
best_training_job = tuner .best_training_job ()
902
870
with timeout_and_delete_endpoint_by_name (best_training_job , sagemaker_session ):
903
871
predictor = tuner .deploy (
0 commit comments