diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index 0e60d58222..fcdd578fea 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -771,13 +771,6 @@ Example:: --env=XLA_USE_BF16=1 -- python your_trainer_file.py -overfit_pct -^^^^^^^^^^^ - -.. warning:: .. deprecated:: 0.8.0. - - Use `overfit_batches`. Will be removed in 0.10.0. - overfit_batches ^^^^^^^^^^^^^^^ Uses this much data of the training set. If nonzero, will use the same training set for validation and testing. diff --git a/pytorch_lightning/trainer/connectors/debugging_connector.py b/pytorch_lightning/trainer/connectors/debugging_connector.py index b39e3f1e83..580d13a6df 100644 --- a/pytorch_lightning/trainer/connectors/debugging_connector.py +++ b/pytorch_lightning/trainer/connectors/debugging_connector.py @@ -24,7 +24,6 @@ class DebuggingConnector: def on_init_start( self, - overfit_pct, limit_train_batches, limit_val_batches, limit_test_batches, diff --git a/pytorch_lightning/trainer/deprecated_api.py b/pytorch_lightning/trainer/deprecated_api.py deleted file mode 100644 index 523572098e..0000000000 --- a/pytorch_lightning/trainer/deprecated_api.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Mirroring deprecated API""" -from abc import ABC - -from pytorch_lightning.utilities import rank_zero_warn - - -class TrainerDeprecatedAPITillVer0_11(ABC): - flush_logs_every_n_steps: int - log_every_n_steps: int - - def __init__(self): - super().__init__() # mixin calls super too - - @property - def log_save_interval(self) -> int: - """Back compatibility, will be removed in v0.11.0""" - rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0" - " and this method will be removed in v0.11.0", DeprecationWarning) - return self.flush_logs_every_n_steps - - @log_save_interval.setter - def log_save_interval(self, val: int): - """Back compatibility, will be removed in v0.11.0""" - rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0" - " and this method will be removed in v0.11.0", DeprecationWarning) - self.flush_logs_every_n_steps = val - - @property - def row_log_interval(self) -> int: - """Back compatibility, will be removed in v0.10.0""" - rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0" - " and this method will be removed in v0.11.0", DeprecationWarning) - return self.log_every_n_steps - - @row_log_interval.setter - def row_log_interval(self, val: int): - """Back compatibility, will be removed in v0.10.0""" - rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0" - " and this method will be removed in v0.11.0", DeprecationWarning) - self.log_every_n_steps = val diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index aa0a893e1e..bfcfde0d5f 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -29,7 +29,6 @@ from pytorch_lightning.profiler import BaseProfiler from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin from pytorch_lightning.trainer.configuration_validator import ConfigValidator from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin -from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_11 from pytorch_lightning.trainer.logging import TrainerLoggingMixin from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin @@ -79,7 +78,6 @@ class Trainer( TrainerLoggingMixin, TrainerTrainingTricksMixin, TrainerDataLoadingMixin, - TrainerDeprecatedAPITillVer0_11, ): def __init__( self, @@ -132,9 +130,6 @@ class Trainer( cluster_environment: ClusterEnvironment = None, amp_backend: str = 'native', amp_level: str = 'O2', - overfit_pct: float = None, # backward compatible, todo: remove in v1.0.0 - log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 - row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11 ): r""" Customize every aspect of training via flags @@ -206,11 +201,6 @@ class Trainer( log_every_n_steps: How often to log within steps (defaults to every 50 steps). - log_save_interval: How often to flush logs to disk. - .. warning:: .. deprecated:: 0.10.0 - - Use `flush_logs_every_n_steps` instead. Will remove v0.11.0. - prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data. Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data @@ -248,11 +238,6 @@ class Trainer( resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here. This can be a URL. - row_log_interval: How often to log within steps. - .. warning:: .. deprecated:: 0.10.0 - - Use `log_every_n_steps` instead. Will remove v0.11.0. - sync_batchnorm: Synchronize batch norm layers between process groups/whole world. terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the @@ -278,19 +263,6 @@ class Trainer( """ super().__init__() - # deprecation warnings - if row_log_interval is not None: - warnings.warn("Argument `row_log_interval` is deprecated in v0.10, use `log_every_n_steps` instead." - " It will be removed in v0.11.0.", DeprecationWarning) - log_every_n_steps = row_log_interval - - if log_save_interval is not None: - warnings.warn( - "Argument `log_save_interval` is deprecated in v0.10, use `flush_logs_every_n_steps` instead." - " It will be removed in v0.11.0.", DeprecationWarning - ) - flush_logs_every_n_steps = log_save_interval - # init connectors self.dev_debugger = InternalDebugger(self) self.config_validator = ConfigValidator(self) @@ -378,7 +350,6 @@ class Trainer( # init debugging flags self.debugging_connector.on_init_start( - overfit_pct, limit_train_batches, limit_val_batches, limit_test_batches, diff --git a/tests/callbacks/test_callback_hook_outputs.py b/tests/callbacks/test_callback_hook_outputs.py index 6ab35ba46b..d98868dc1f 100644 --- a/tests/callbacks/test_callback_hook_outputs.py +++ b/tests/callbacks/test_callback_hook_outputs.py @@ -45,7 +45,7 @@ def test_train_step_no_return(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py index 3e8639c912..00f142e79e 100644 --- a/tests/test_deprecated.py +++ b/tests/test_deprecated.py @@ -16,29 +16,6 @@ def _soft_unimport_module(str_module): del sys.modules[str_module] -def test_tbd_remove_in_v0_11_0_trainer(): - with pytest.deprecated_call(match='will be removed in v0.11.0'): - LearningRateLogger() - - with pytest.deprecated_call(match='will be removed in v0.11.0'): - trainer = Trainer(row_log_interval=8) - assert trainer.log_every_n_steps == 8 - with pytest.deprecated_call(match='will be removed in v0.11.0'): - assert trainer.row_log_interval == 8 - - with pytest.deprecated_call(match='will be removed in v0.11.0'): - trainer = Trainer(log_save_interval=9) - assert trainer.flush_logs_every_n_steps == 9 - with pytest.deprecated_call(match='will be removed in v0.11.0'): - assert trainer.log_save_interval == 9 - - -@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") -def test_tbd_remove_in_v0_11_0_trainer_gpu(): - with pytest.deprecated_call(match='will be removed in v0.11.0'): - GpuUsageLogger() - - class ModelVer0_6(EvalModelTemplate): # todo: this shall not be needed while evaluate asks for dataloader explicitly diff --git a/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py b/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py index 3823dec33f..b986c832d6 100644 --- a/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py +++ b/tests/trainer/data_flow/test_train_loop_flow_scalar_1_0.py @@ -209,7 +209,7 @@ def test_train_step_no_return(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/dynamic_args/test_multiple_eval_dataloaders.py b/tests/trainer/dynamic_args/test_multiple_eval_dataloaders.py index 385c18a50b..356e58e7df 100644 --- a/tests/trainer/dynamic_args/test_multiple_eval_dataloaders.py +++ b/tests/trainer/dynamic_args/test_multiple_eval_dataloaders.py @@ -56,7 +56,7 @@ def test_multiple_eval_dataloaders_tuple(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) @@ -87,7 +87,7 @@ def test_multiple_eval_dataloaders_list(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) @@ -145,7 +145,7 @@ def test_multiple_optimizers_multiple_dataloaders(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/dynamic_args/test_multiple_optimizers.py b/tests/trainer/dynamic_args/test_multiple_optimizers.py index eb95f2a43a..267f3d03b6 100644 --- a/tests/trainer/dynamic_args/test_multiple_optimizers.py +++ b/tests/trainer/dynamic_args/test_multiple_optimizers.py @@ -41,7 +41,7 @@ def test_multiple_optimizers(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/flags/test_overfit_batches.py b/tests/trainer/flags/test_overfit_batches.py index cf172f469f..d0f9383a36 100644 --- a/tests/trainer/flags/test_overfit_batches.py +++ b/tests/trainer/flags/test_overfit_batches.py @@ -29,7 +29,7 @@ def test_overfit_multiple_val_loaders(tmpdir): default_root_dir=tmpdir, max_epochs=2, overfit_batches=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/logging/test_eval_loop_logging_1_0.py b/tests/trainer/logging/test_eval_loop_logging_1_0.py index 7980285517..622c4ac474 100644 --- a/tests/trainer/logging/test_eval_loop_logging_1_0.py +++ b/tests/trainer/logging/test_eval_loop_logging_1_0.py @@ -217,7 +217,7 @@ def test_eval_float_logging(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=1, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/logging/test_train_loop_logging_1_0.py b/tests/trainer/logging/test_train_loop_logging_1_0.py index ab4541489d..9f93264845 100644 --- a/tests/trainer/logging/test_train_loop_logging_1_0.py +++ b/tests/trainer/logging/test_train_loop_logging_1_0.py @@ -336,7 +336,7 @@ def test_tbptt_log(tmpdir): limit_val_batches=0, truncated_bptt_steps=truncated_bptt_steps, max_epochs=2, - row_log_interval=2, + log_every_n_steps=2, weights_summary=None, ) trainer.fit(model) diff --git a/tests/trainer/model_hooks/test_model_hooks.py b/tests/trainer/model_hooks/test_model_hooks.py index 67167f5129..ba3d049822 100644 --- a/tests/trainer/model_hooks/test_model_hooks.py +++ b/tests/trainer/model_hooks/test_model_hooks.py @@ -19,7 +19,7 @@ def test_eval_train_calls(test_train_mock, test_eval_mock, val_train_mock, val_e limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, ) diff --git a/tests/trainer/warnings/test_flow_warnings.py b/tests/trainer/warnings/test_flow_warnings.py index 7e5ec5fe93..cd216adb07 100644 --- a/tests/trainer/warnings/test_flow_warnings.py +++ b/tests/trainer/warnings/test_flow_warnings.py @@ -23,7 +23,7 @@ def test_no_depre_without_epoch_end(tmpdir): limit_train_batches=2, limit_val_batches=2, max_epochs=2, - row_log_interval=1, + log_every_n_steps=1, weights_summary=None, )