Skip to content

Commit 3bfdea9

Browse files
authored
Drop check of monai.utils.get_torch_version_tuple() >= (1, 6) (Project-MONAI#1072)
Fixes Project-MONAI#509 ### Description Drop check of monai.utils.get_torch_version_tuple() >= (1, 6) because the core codebase now requires at least torch 1.6. See also [here](Project-MONAI/MONAI#3353) ### Checks - [x] Notebook runs automatically `./runner [-p <regex_pattern>]`
1 parent a971dce commit 3bfdea9

File tree

8 files changed

+5
-23
lines changed

8 files changed

+5
-23
lines changed

acceleration/distributed_training/unet_evaluation_workflows.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,8 +173,7 @@ def evaluate(args):
173173
},
174174
additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]), device=device)},
175175
val_handlers=val_handlers,
176-
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
177-
amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False,
176+
amp=True,
178177
)
179178
evaluator.run()
180179
dist.destroy_process_group()

acceleration/distributed_training/unet_training_workflows.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,7 @@ def train(args):
177177
optimizer=opt,
178178
loss_function=loss,
179179
inferer=SimpleInferer(),
180-
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
181-
amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False,
180+
amp=True,
182181
postprocessing=train_post_transforms,
183182
key_train_metric={"train_acc": Accuracy(output_transform=from_engine(["pred", "label"]), device=device)},
184183
train_handlers=train_handlers,

modules/engines/unet_evaluation_dict.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,7 @@ def main(tempdir):
112112
},
113113
additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))},
114114
val_handlers=val_handlers,
115-
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
116-
amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False,
115+
amp=True,
117116
)
118117
evaluator.run()
119118

modules/engines/unet_training_dict.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,7 @@ def main(tempdir):
147147
},
148148
additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))},
149149
val_handlers=val_handlers,
150-
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
151-
amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False,
150+
amp=True,
152151
)
153152

154153
train_post_transforms = Compose(
@@ -182,7 +181,7 @@ def main(tempdir):
182181
key_train_metric={"train_acc": Accuracy(output_transform=from_engine(["pred", "label"]))},
183182
train_handlers=train_handlers,
184183
# if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP training
185-
amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False,
184+
amp=True,
186185
)
187186
# set initialized trainer for "early stop" handlers
188187
val_handlers[0].set_trainer(trainer=trainer)

pathology/tumor_detection/ignite/camelyon_train_evaluate.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -184,12 +184,6 @@ def train(cfg):
184184
else:
185185
optimizer = SGD(model.parameters(), lr=cfg["lr"], momentum=0.9)
186186

187-
# AMP scaler
188-
if cfg["amp"]:
189-
cfg["amp"] = True if monai.utils.get_torch_version_tuple() >= (1, 6) else False
190-
else:
191-
cfg["amp"] = False
192-
193187
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg["n_epochs"])
194188

195189
# --------------------------------------------

pathology/tumor_detection/ignite/camelyon_train_evaluate_nvtx_profiling.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -187,12 +187,6 @@ def train(cfg):
187187
else:
188188
optimizer = SGD(model.parameters(), lr=cfg["lr"], momentum=0.9)
189189

190-
# AMP scaler
191-
if cfg["amp"]:
192-
cfg["amp"] = True if monai.utils.get_torch_version_tuple() >= (1, 6) else False
193-
else:
194-
cfg["amp"] = False
195-
196190
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg["n_epochs"])
197191

198192
# --------------------------------------------

pathology/tumor_detection/torch/camelyon_train_evaluate_pytorch_gpu.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,6 @@ def main(cfg):
375375
optimizer = SGD(model.parameters(), lr=cfg["lr"], momentum=0.9)
376376

377377
# AMP scaler
378-
cfg["amp"] = cfg["amp"] and monai.utils.get_torch_version_tuple() >= (1, 6)
379378
if cfg["amp"] is True:
380379
scaler = GradScaler()
381380
else:

performance_profiling/pathology/train_evaluate_nvtx.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,6 @@ def main(cfg):
382382
optimizer = SGD(model.parameters(), lr=cfg["lr"], momentum=0.9)
383383

384384
# AMP scaler
385-
cfg["amp"] = cfg["amp"] and monai.utils.get_torch_version_tuple() >= (1, 6)
386385
if cfg["amp"] is True:
387386
scaler = GradScaler()
388387
else:

0 commit comments

Comments
 (0)