prune unused methods (#5860)
This commit is contained in:
parent
7f8fdda9a2
commit
26bc754cc1
1
Makefile
1
Makefile
|
@ -9,6 +9,7 @@ clean:
|
|||
# clean all temp runs
|
||||
rm -rf $(shell find . -name "mlruns")
|
||||
rm -rf $(shell find . -name "lightning_log")
|
||||
rm -rf $(shell find . -name "lightning_logs")
|
||||
rm -rf _ckpt_*
|
||||
rm -rf .mypy_cache
|
||||
rm -rf .pytest_cache
|
||||
|
|
|
@ -41,16 +41,6 @@ class ConfigureOptimizersPool(ABC):
|
|||
optimizer = optim.Adagrad(self.parameters(), lr=self.learning_rate)
|
||||
return optimizer
|
||||
|
||||
def configure_optimizers__multiple_optimizers(self):
|
||||
"""
|
||||
return whatever optimizers we want here.
|
||||
:return: list of optimizers
|
||||
"""
|
||||
# try no scheduler for this model (testing purposes)
|
||||
optimizer1 = optim.Adam(self.parameters(), lr=self.learning_rate)
|
||||
optimizer2 = optim.Adam(self.parameters(), lr=self.learning_rate)
|
||||
return optimizer1, optimizer2
|
||||
|
||||
def configure_optimizers__multiple_optimizers_frequency(self):
|
||||
optimizer1 = optim.Adam(self.parameters(), lr=self.learning_rate)
|
||||
optimizer2 = optim.Adam(self.parameters(), lr=self.learning_rate)
|
||||
|
@ -70,11 +60,6 @@ class ConfigureOptimizersPool(ABC):
|
|||
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
|
||||
return [optimizer], [lr_scheduler]
|
||||
|
||||
def configure_optimizers__onecycle_scheduler(self):
|
||||
optimizer = optim.SGD(self.parameters(), lr=self.learning_rate, momentum=0.9)
|
||||
lr_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=self.learning_rate, total_steps=10_000)
|
||||
return [optimizer], [lr_scheduler]
|
||||
|
||||
def configure_optimizers__multiple_schedulers(self):
|
||||
optimizer1 = optim.Adam(self.parameters(), lr=self.learning_rate)
|
||||
optimizer2 = optim.Adam(self.parameters(), lr=self.learning_rate)
|
||||
|
|
|
@ -35,9 +35,3 @@ class TestDataloaderVariations(ABC):
|
|||
lengths = [50, 30, 40]
|
||||
dataloaders = [self.dataloader(train=False, num_samples=n) for n in lengths]
|
||||
return dataloaders
|
||||
|
||||
def test_dataloader__empty(self):
|
||||
return None
|
||||
|
||||
def test_dataloader__multiple(self):
|
||||
return [self.dataloader(train=False), self.dataloader(train=False)]
|
||||
|
|
|
@ -43,9 +43,3 @@ class TrainDataloaderVariations(ABC):
|
|||
'a': self.dataloader(train=True, num_samples=100),
|
||||
'b': self.dataloader(train=True, num_samples=50),
|
||||
}
|
||||
|
||||
def train_dataloader__multiple_sequence(self):
|
||||
return [
|
||||
self.dataloader(train=True, num_samples=100),
|
||||
self.dataloader(train=True, num_samples=50),
|
||||
]
|
||||
|
|
|
@ -62,72 +62,6 @@ class TrainingStepVariations(ABC):
|
|||
output /= 0
|
||||
return output
|
||||
|
||||
def training_step_end_full_loop_result_obj_dp(self, result):
|
||||
"""
|
||||
Full loop flow train step (result obj + dp)
|
||||
"""
|
||||
result.minimize = result.minimize.mean()
|
||||
result.checkpoint_on = result.checkpoint_on.mean()
|
||||
result.train_step_metric = result.train_step_metric.mean()
|
||||
result.log('train_step_end_metric', 1)
|
||||
self.training_step_end_called = True
|
||||
return result
|
||||
|
||||
def training_epoch_end_full_loop_result_obj_dp(self, result):
|
||||
"""
|
||||
Full loop flow train step (result obj + dp)
|
||||
"""
|
||||
result.log('train_epoch_end_metric', 1, on_epoch=True)
|
||||
self.training_epoch_end_called = True
|
||||
|
||||
return result
|
||||
|
||||
def eval_step_end_full_loop_result_obj_dp(self, result):
|
||||
"""
|
||||
Full loop flow train step (result obj + dp)
|
||||
"""
|
||||
eval_name = 'validation' if not self.trainer.testing else 'test'
|
||||
reduced = getattr(result, f'{eval_name}_step_metric_step').mean()
|
||||
setattr(result, f'{eval_name}_step_metric_step', reduced)
|
||||
|
||||
reduced = getattr(result, f'{eval_name}_step_metric_epoch').mean()
|
||||
setattr(result, f'{eval_name}_step_metric_epoch', reduced)
|
||||
|
||||
reduced = getattr(result, f'{eval_name}_step_metric').mean()
|
||||
setattr(result, f'{eval_name}_step_metric', reduced)
|
||||
|
||||
result.checkpoint_on = result.checkpoint_on.mean()
|
||||
result.early_stop_on = result.early_stop_on.mean()
|
||||
result.log(f'{eval_name}_step_end_metric', torch.tensor(1).type_as(result.checkpoint_on))
|
||||
setattr(self, f'{eval_name}_step_end_called', True)
|
||||
|
||||
return result
|
||||
|
||||
def eval_epoch_end_full_loop_result_obj_dp(self, result):
|
||||
"""
|
||||
Full loop flow train step (result obj + dp)
|
||||
"""
|
||||
eval_name = 'validation' if not self.trainer.testing else 'test'
|
||||
result.log(f'{eval_name}_epoch_end_metric', torch.tensor(1).type_as(result.checkpoint_on), on_epoch=True)
|
||||
result.checkpoint_on = result.checkpoint_on.mean()
|
||||
result.early_stop_on = result.early_stop_on.mean()
|
||||
setattr(self, f'{eval_name}_epoch_end_called', True)
|
||||
|
||||
# reduce the parametrized values
|
||||
reduced = getattr(result, f'{eval_name}_step_metric_step').mean()
|
||||
setattr(result, f'{eval_name}_step_metric_step', reduced)
|
||||
|
||||
reduced = getattr(result, f'{eval_name}_step_metric_epoch').mean()
|
||||
setattr(result, f'{eval_name}_step_metric_epoch', reduced)
|
||||
|
||||
reduced = getattr(result, f'{eval_name}_step_end_metric').mean()
|
||||
setattr(result, f'{eval_name}_step_end_metric', reduced)
|
||||
|
||||
reduced = getattr(result, f'{eval_name}_step_metric').mean()
|
||||
setattr(result, f'{eval_name}_step_metric', reduced)
|
||||
|
||||
return result
|
||||
|
||||
def training_step__multiple_dataloaders(self, batch, batch_idx, optimizer_idx=None):
|
||||
"""Training step for multiple train loaders"""
|
||||
|
||||
|
|
|
@ -21,30 +21,6 @@ class ValidationEpochEndVariations(ABC):
|
|||
Houses all variations of validation_epoch_end steps
|
||||
"""
|
||||
|
||||
def validation_epoch_end_no_monitor(self, outputs):
|
||||
"""
|
||||
Called at the end of validation to aggregate outputs
|
||||
|
||||
Args:
|
||||
outputs: list of individual outputs of each validation step
|
||||
"""
|
||||
|
||||
# if returned a scalar from validation_step, outputs is a list of tensor scalars
|
||||
# we return just the average in this case (if we want)
|
||||
def _mean(res, key):
|
||||
# recursive mean for multilevel dicts
|
||||
return torch.stack([x[key] if isinstance(x, dict) else _mean(x, key) for x in res]).mean()
|
||||
|
||||
val_acc_mean = _mean(outputs, 'val_acc')
|
||||
|
||||
# alternate between tensor and scalar
|
||||
if self.current_epoch % 2 == 0:
|
||||
val_acc_mean = val_acc_mean.item()
|
||||
|
||||
metrics_dict = {'val_acc': val_acc_mean}
|
||||
results = {'progress_bar': metrics_dict, 'log': metrics_dict}
|
||||
return results
|
||||
|
||||
def validation_epoch_end(self, outputs):
|
||||
"""
|
||||
Called at the end of validation to aggregate outputs
|
||||
|
|
|
@ -143,7 +143,7 @@ def test_multiple_test_dataloader(tmpdir, ckpt_path):
|
|||
class MultipleTestDataloaderModel(EvalModelTemplate):
|
||||
|
||||
def test_dataloader(self):
|
||||
return model_template.test_dataloader__multiple()
|
||||
return [self.dataloader(train=False), self.dataloader(train=False)]
|
||||
|
||||
def test_step(self, batch, batch_idx, *args, **kwargs):
|
||||
return model_template.test_step__multiple_dataloaders(batch, batch_idx, *args, **kwargs)
|
||||
|
|
Loading…
Reference in New Issue