update usage of deprecated automatic_optimization (#5011)
* drop deprecated usage automatic_optimization * Apply suggestions from code review Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * Apply suggestions from code review Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com>
This commit is contained in:
parent
77fb425dd4
commit
4ebce38478
|
@ -1412,8 +1412,10 @@ class LightningModule(
|
||||||
|
|
||||||
def _verify_is_manual_optimization(self, fn_name):
|
def _verify_is_manual_optimization(self, fn_name):
|
||||||
if self.trainer.train_loop.automatic_optimization:
|
if self.trainer.train_loop.automatic_optimization:
|
||||||
m = f'to use {fn_name}, please disable automatic optimization: Trainer(automatic_optimization=False)'
|
raise MisconfigurationException(
|
||||||
raise MisconfigurationException(m)
|
f'to use {fn_name}, please disable automatic optimization:'
|
||||||
|
' set model property `automatic_optimization` as False'
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
|
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
|
||||||
|
|
|
@ -79,7 +79,7 @@ class ConfigValidator(object):
|
||||||
if trainer.overriden_optimizer_step and not enable_pl_optimizer and automatic_optimization:
|
if trainer.overriden_optimizer_step and not enable_pl_optimizer and automatic_optimization:
|
||||||
rank_zero_warn(
|
rank_zero_warn(
|
||||||
"When overriding `LightningModule` optimizer_step with"
|
"When overriding `LightningModule` optimizer_step with"
|
||||||
" `Trainer(..., enable_pl_optimizer=False, automatic_optimization=True, ...)`,"
|
" `Trainer(..., enable_pl_optimizer=False, ...)`,"
|
||||||
" we won't be calling `.zero_grad` we can't assume when you call your `optimizer.step()`."
|
" we won't be calling `.zero_grad` we can't assume when you call your `optimizer.step()`."
|
||||||
" For Lightning to take care of it, please use `Trainer(enable_pl_optimizer=True)`."
|
" For Lightning to take care of it, please use `Trainer(enable_pl_optimizer=True)`."
|
||||||
)
|
)
|
||||||
|
@ -89,15 +89,16 @@ class ConfigValidator(object):
|
||||||
has_overriden_optimization_functions = trainer.overriden_optimizer_step or trainer.overriden_optimizer_zero_grad
|
has_overriden_optimization_functions = trainer.overriden_optimizer_step or trainer.overriden_optimizer_zero_grad
|
||||||
if (has_overriden_optimization_functions) and going_to_accumulate_grad_batches and automatic_optimization:
|
if (has_overriden_optimization_functions) and going_to_accumulate_grad_batches and automatic_optimization:
|
||||||
raise MisconfigurationException(
|
raise MisconfigurationException(
|
||||||
'When overriding `LightningModule` optimizer_step or optimizer_zero_grad with '
|
'When overriding `LightningModule` optimizer_step or optimizer_zero_grad'
|
||||||
'`Trainer(automatic_optimization=True, ...)`, `accumulate_grad_batches` should to be 1.'
|
' , `accumulate_grad_batches` in `Trainer` should to be 1.'
|
||||||
' It ensures optimizer_step or optimizer_zero_grad are called on every batch.'
|
' It ensures optimizer_step or optimizer_zero_grad are called on every batch.'
|
||||||
)
|
)
|
||||||
|
|
||||||
if (enable_pl_optimizer) and trainer.overriden_optimizer_zero_grad and not automatic_optimization:
|
if (enable_pl_optimizer) and trainer.overriden_optimizer_zero_grad and not automatic_optimization:
|
||||||
raise MisconfigurationException(
|
raise MisconfigurationException(
|
||||||
'When overriding `LightningModule` optimizer_zero_grad with '
|
'When overriding `LightningModule` optimizer_zero_grad'
|
||||||
'`Trainer(automatic_optimization=False, enable_pl_optimizer=True, ...) is not supported'
|
' and preserving model property `automatic_optimization` as True with'
|
||||||
|
' `Trainer(enable_pl_optimizer=True, ...) is not supported'
|
||||||
)
|
)
|
||||||
|
|
||||||
def __verify_eval_loop_configuration(self, model, eval_loop_name):
|
def __verify_eval_loop_configuration(self, model, eval_loop_name):
|
||||||
|
|
|
@ -358,7 +358,7 @@ class Trainer(
|
||||||
)
|
)
|
||||||
|
|
||||||
# init train loop related flags
|
# init train loop related flags
|
||||||
# TODO: deprecate in 1.2.0
|
# TODO: remove in 1.3.0
|
||||||
if automatic_optimization is None:
|
if automatic_optimization is None:
|
||||||
automatic_optimization = True
|
automatic_optimization = True
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -38,7 +38,6 @@ def test_automatic_optimization(tmpdir):
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=2,
|
limit_train_batches=2,
|
||||||
accumulate_grad_batches=2,
|
accumulate_grad_batches=2,
|
||||||
automatic_optimization=True
|
|
||||||
)
|
)
|
||||||
|
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
@ -90,7 +89,6 @@ def test_automatic_optimization_num_calls(enable_pl_optimizer, tmpdir):
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=8,
|
limit_train_batches=8,
|
||||||
accumulate_grad_batches=1,
|
accumulate_grad_batches=1,
|
||||||
automatic_optimization=True,
|
|
||||||
enable_pl_optimizer=enable_pl_optimizer
|
enable_pl_optimizer=enable_pl_optimizer
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -112,6 +112,10 @@ def test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step,
|
||||||
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
|
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
|
||||||
return [optimizer_1, optimizer_2], [lr_scheduler]
|
return [optimizer_1, optimizer_2], [lr_scheduler]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.training_step_end = None
|
model.training_step_end = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
@ -121,8 +125,8 @@ def test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step,
|
||||||
limit_val_batches=1,
|
limit_val_batches=1,
|
||||||
max_epochs=1,
|
max_epochs=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
automatic_optimization=False,
|
enable_pl_optimizer=True,
|
||||||
enable_pl_optimizer=True)
|
)
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
|
||||||
assert len(mock_sgd_step.mock_calls) == 2
|
assert len(mock_sgd_step.mock_calls) == 2
|
||||||
|
@ -161,6 +165,10 @@ def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_
|
||||||
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
|
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
|
||||||
return [optimizer_1, optimizer_2], [lr_scheduler]
|
return [optimizer_1, optimizer_2], [lr_scheduler]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.training_step_end = None
|
model.training_step_end = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
@ -170,7 +178,6 @@ def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_
|
||||||
limit_val_batches=1,
|
limit_val_batches=1,
|
||||||
max_epochs=1,
|
max_epochs=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
automatic_optimization=False,
|
|
||||||
accumulate_grad_batches=2,
|
accumulate_grad_batches=2,
|
||||||
enable_pl_optimizer=True,
|
enable_pl_optimizer=True,
|
||||||
)
|
)
|
||||||
|
@ -237,7 +244,6 @@ def test_lightning_optimizer_automatic_optimization(tmpdir):
|
||||||
max_epochs=1,
|
max_epochs=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
enable_pl_optimizer=True,
|
enable_pl_optimizer=True,
|
||||||
automatic_optimization=True
|
|
||||||
)
|
)
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
|
||||||
|
@ -291,7 +297,6 @@ def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
|
||||||
max_epochs=1,
|
max_epochs=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
enable_pl_optimizer=True,
|
enable_pl_optimizer=True,
|
||||||
automatic_optimization=True
|
|
||||||
)
|
)
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
|
||||||
|
@ -352,7 +357,6 @@ def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad_make_opt
|
||||||
max_epochs=1,
|
max_epochs=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
enable_pl_optimizer=True,
|
enable_pl_optimizer=True,
|
||||||
automatic_optimization=True
|
|
||||||
)
|
)
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
|
||||||
|
@ -406,7 +410,6 @@ def test_lightning_optimizer_automatic_optimization_make_optimizer_step_2(tmpdir
|
||||||
max_epochs=1,
|
max_epochs=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
enable_pl_optimizer=True,
|
enable_pl_optimizer=True,
|
||||||
automatic_optimization=True,
|
|
||||||
)
|
)
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
|
||||||
|
|
|
@ -97,11 +97,14 @@ def test_multiple_optimizers_manual(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=2,
|
limit_train_batches=2,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
|
|
@ -69,12 +69,15 @@ def test_multiple_optimizers_manual(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -133,12 +136,15 @@ def test_multiple_optimizers_manual_return(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -198,12 +204,15 @@ def test_multiple_optimizers_manual_return_and_log(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -265,12 +274,15 @@ def test_multiple_optimizers_manual_native_amp(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -278,7 +290,7 @@ def test_multiple_optimizers_manual_native_amp(tmpdir):
|
||||||
log_every_n_steps=1,
|
log_every_n_steps=1,
|
||||||
weights_summary=None,
|
weights_summary=None,
|
||||||
precision=16,
|
precision=16,
|
||||||
gpus=1
|
gpus=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
@ -335,12 +347,15 @@ def test_multiple_optimizers_manual_apex(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -412,6 +427,10 @@ class ManualOptimizationExtendedModel(BoringModel):
|
||||||
assert self.called["on_train_batch_start"] == 10
|
assert self.called["on_train_batch_start"] == 10
|
||||||
assert self.called["on_train_batch_end"] == 10
|
assert self.called["on_train_batch_end"] == 10
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
||||||
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
|
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
|
||||||
|
@ -431,7 +450,6 @@ def test_manual_optimization_and_return_tensor(tmpdir):
|
||||||
limit_train_batches=10,
|
limit_train_batches=10,
|
||||||
limit_test_batches=0,
|
limit_test_batches=0,
|
||||||
limit_val_batches=0,
|
limit_val_batches=0,
|
||||||
automatic_optimization=False,
|
|
||||||
precision=16,
|
precision=16,
|
||||||
amp_backend='native',
|
amp_backend='native',
|
||||||
accelerator="ddp_spawn",
|
accelerator="ddp_spawn",
|
||||||
|
@ -461,7 +479,6 @@ def test_manual_optimization_and_return_detached_tensor(tmpdir):
|
||||||
limit_train_batches=10,
|
limit_train_batches=10,
|
||||||
limit_test_batches=0,
|
limit_test_batches=0,
|
||||||
limit_val_batches=0,
|
limit_val_batches=0,
|
||||||
automatic_optimization=False,
|
|
||||||
precision=16,
|
precision=16,
|
||||||
amp_backend='native',
|
amp_backend='native',
|
||||||
accelerator="ddp_spawn",
|
accelerator="ddp_spawn",
|
||||||
|
@ -538,6 +555,10 @@ def test_manual_optimization_and_accumulated_gradient(tmpdir):
|
||||||
assert self.called["on_train_batch_start"] == 20
|
assert self.called["on_train_batch_start"] == 20
|
||||||
assert self.called["on_train_batch_end"] == 20
|
assert self.called["on_train_batch_end"] == 20
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = ExtendedModel()
|
model = ExtendedModel()
|
||||||
model.training_step_end = None
|
model.training_step_end = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
@ -548,7 +569,6 @@ def test_manual_optimization_and_accumulated_gradient(tmpdir):
|
||||||
limit_train_batches=20,
|
limit_train_batches=20,
|
||||||
limit_test_batches=0,
|
limit_test_batches=0,
|
||||||
limit_val_batches=0,
|
limit_val_batches=0,
|
||||||
automatic_optimization=False,
|
|
||||||
precision=16,
|
precision=16,
|
||||||
amp_backend='native',
|
amp_backend='native',
|
||||||
accumulate_grad_batches=4,
|
accumulate_grad_batches=4,
|
||||||
|
@ -610,12 +630,15 @@ def test_multiple_optimizers_step(tmpdir):
|
||||||
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer, optimizer_2
|
return optimizer, optimizer_2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -692,13 +715,16 @@ def test_step_with_optimizer_closure(tmpdir):
|
||||||
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer
|
return optimizer
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
|
||||||
limit_train_batches = 2
|
limit_train_batches = 2
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -753,13 +779,16 @@ def test_step_with_optimizer_closure_and_accumulated_grad(tmpdir):
|
||||||
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer
|
return optimizer
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
|
||||||
limit_train_batches = 4
|
limit_train_batches = 4
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -806,13 +835,16 @@ def test_step_with_optimizer_closure_and_extra_arguments(step_mock, tmpdir):
|
||||||
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
|
||||||
return optimizer
|
return optimizer
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
|
||||||
limit_train_batches = 4
|
limit_train_batches = 4
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -881,13 +913,16 @@ def test_step_with_optimizer_closure_with_different_frequencies(mock_sgd_step, m
|
||||||
optimizer_dis = torch.optim.Adam(self.layer.parameters(), lr=0.001)
|
optimizer_dis = torch.optim.Adam(self.layer.parameters(), lr=0.001)
|
||||||
return [optimizer_gen, optimizer_dis]
|
return [optimizer_gen, optimizer_dis]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
|
||||||
limit_train_batches = 8
|
limit_train_batches = 8
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -985,6 +1020,10 @@ def test_step_with_optimizer_closure_with_different_frequencies_ddp(mock_sgd_ste
|
||||||
optimizer_dis = torch.optim.Adam(self.layer.parameters(), lr=0.001)
|
optimizer_dis = torch.optim.Adam(self.layer.parameters(), lr=0.001)
|
||||||
return [optimizer_gen, optimizer_dis]
|
return [optimizer_gen, optimizer_dis]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
seed_everything(42)
|
seed_everything(42)
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
|
@ -993,7 +1032,6 @@ def test_step_with_optimizer_closure_with_different_frequencies_ddp(mock_sgd_ste
|
||||||
|
|
||||||
limit_train_batches = 8
|
limit_train_batches = 8
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -1023,13 +1061,16 @@ def test_step_with_misconfiguraiton_error_when_overriding_optimizer_zero_grad(tm
|
||||||
def optimizer_zero_grad(self, *_):
|
def optimizer_zero_grad(self, *_):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def automatic_optimization(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
model = TestModel()
|
model = TestModel()
|
||||||
model.val_dataloader = None
|
model.val_dataloader = None
|
||||||
model.training_epoch_end = None
|
model.training_epoch_end = None
|
||||||
|
|
||||||
limit_train_batches = 8
|
limit_train_batches = 8
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
automatic_optimization=False,
|
|
||||||
default_root_dir=tmpdir,
|
default_root_dir=tmpdir,
|
||||||
limit_train_batches=limit_train_batches,
|
limit_train_batches=limit_train_batches,
|
||||||
limit_val_batches=2,
|
limit_val_batches=2,
|
||||||
|
@ -1039,4 +1080,4 @@ def test_step_with_misconfiguraiton_error_when_overriding_optimizer_zero_grad(tm
|
||||||
enable_pl_optimizer=True,
|
enable_pl_optimizer=True,
|
||||||
)
|
)
|
||||||
except MisconfigurationException as e:
|
except MisconfigurationException as e:
|
||||||
assert "`Trainer(automatic_optimization=False, enable_pl_optimizer=True, ...) is not supported" in str(e)
|
assert "`Trainer(enable_pl_optimizer=True, ...) is not supported" in str(e)
|
||||||
|
|
Loading…
Reference in New Issue