removed deprecated trainer flags (#3969)

* removed deprecated flags

* removed es callback flag
This commit is contained in:
William Falcon 2020-10-07 23:46:21 -04:00 committed by GitHub
parent 6f1a2ce517
commit 1d3c7dc8d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 11 additions and 125 deletions

View File

@ -771,13 +771,6 @@ Example::
--env=XLA_USE_BF16=1
-- python your_trainer_file.py
overfit_pct
^^^^^^^^^^^
.. warning:: .. deprecated:: 0.8.0.
Use `overfit_batches`. Will be removed in 0.10.0.
overfit_batches
^^^^^^^^^^^^^^^
Uses this much data of the training set. If nonzero, will use the same training set for validation and testing.

View File

@ -24,7 +24,6 @@ class DebuggingConnector:
def on_init_start(
self,
overfit_pct,
limit_train_batches,
limit_val_batches,
limit_test_batches,

View File

@ -1,54 +0,0 @@
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mirroring deprecated API"""
from abc import ABC
from pytorch_lightning.utilities import rank_zero_warn
class TrainerDeprecatedAPITillVer0_11(ABC):
flush_logs_every_n_steps: int
log_every_n_steps: int
def __init__(self):
super().__init__() # mixin calls super too
@property
def log_save_interval(self) -> int:
"""Back compatibility, will be removed in v0.11.0"""
rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0"
" and this method will be removed in v0.11.0", DeprecationWarning)
return self.flush_logs_every_n_steps
@log_save_interval.setter
def log_save_interval(self, val: int):
"""Back compatibility, will be removed in v0.11.0"""
rank_zero_warn("Attribute `log_save_interval` is now set by `flush_logs_every_n_steps` since v0.10.0"
" and this method will be removed in v0.11.0", DeprecationWarning)
self.flush_logs_every_n_steps = val
@property
def row_log_interval(self) -> int:
"""Back compatibility, will be removed in v0.10.0"""
rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0"
" and this method will be removed in v0.11.0", DeprecationWarning)
return self.log_every_n_steps
@row_log_interval.setter
def row_log_interval(self, val: int):
"""Back compatibility, will be removed in v0.10.0"""
rank_zero_warn("Attribute `row_log_interval` is now set by `log_every_n_steps` since v0.10.0"
" and this method will be removed in v0.11.0", DeprecationWarning)
self.log_every_n_steps = val

View File

@ -29,7 +29,6 @@ from pytorch_lightning.profiler import BaseProfiler
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.trainer.configuration_validator import ConfigValidator
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_11
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
@ -79,7 +78,6 @@ class Trainer(
TrainerLoggingMixin,
TrainerTrainingTricksMixin,
TrainerDataLoadingMixin,
TrainerDeprecatedAPITillVer0_11,
):
def __init__(
self,
@ -132,9 +130,6 @@ class Trainer(
cluster_environment: ClusterEnvironment = None,
amp_backend: str = 'native',
amp_level: str = 'O2',
overfit_pct: float = None, # backward compatible, todo: remove in v1.0.0
log_save_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11
row_log_interval: Optional[int] = None, # backward compatible, todo: remove in 0.11
):
r"""
Customize every aspect of training via flags
@ -206,11 +201,6 @@ class Trainer(
log_every_n_steps: How often to log within steps (defaults to every 50 steps).
log_save_interval: How often to flush logs to disk.
.. warning:: .. deprecated:: 0.10.0
Use `flush_logs_every_n_steps` instead. Will remove v0.11.0.
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
@ -248,11 +238,6 @@ class Trainer(
resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here.
This can be a URL.
row_log_interval: How often to log within steps.
.. warning:: .. deprecated:: 0.10.0
Use `log_every_n_steps` instead. Will remove v0.11.0.
sync_batchnorm: Synchronize batch norm layers between process groups/whole world.
terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the
@ -278,19 +263,6 @@ class Trainer(
"""
super().__init__()
# deprecation warnings
if row_log_interval is not None:
warnings.warn("Argument `row_log_interval` is deprecated in v0.10, use `log_every_n_steps` instead."
" It will be removed in v0.11.0.", DeprecationWarning)
log_every_n_steps = row_log_interval
if log_save_interval is not None:
warnings.warn(
"Argument `log_save_interval` is deprecated in v0.10, use `flush_logs_every_n_steps` instead."
" It will be removed in v0.11.0.", DeprecationWarning
)
flush_logs_every_n_steps = log_save_interval
# init connectors
self.dev_debugger = InternalDebugger(self)
self.config_validator = ConfigValidator(self)
@ -378,7 +350,6 @@ class Trainer(
# init debugging flags
self.debugging_connector.on_init_start(
overfit_pct,
limit_train_batches,
limit_val_batches,
limit_test_batches,

View File

@ -45,7 +45,7 @@ def test_train_step_no_return(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)

View File

@ -16,29 +16,6 @@ def _soft_unimport_module(str_module):
del sys.modules[str_module]
def test_tbd_remove_in_v0_11_0_trainer():
with pytest.deprecated_call(match='will be removed in v0.11.0'):
LearningRateLogger()
with pytest.deprecated_call(match='will be removed in v0.11.0'):
trainer = Trainer(row_log_interval=8)
assert trainer.log_every_n_steps == 8
with pytest.deprecated_call(match='will be removed in v0.11.0'):
assert trainer.row_log_interval == 8
with pytest.deprecated_call(match='will be removed in v0.11.0'):
trainer = Trainer(log_save_interval=9)
assert trainer.flush_logs_every_n_steps == 9
with pytest.deprecated_call(match='will be removed in v0.11.0'):
assert trainer.log_save_interval == 9
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_tbd_remove_in_v0_11_0_trainer_gpu():
with pytest.deprecated_call(match='will be removed in v0.11.0'):
GpuUsageLogger()
class ModelVer0_6(EvalModelTemplate):
# todo: this shall not be needed while evaluate asks for dataloader explicitly

View File

@ -209,7 +209,7 @@ def test_train_step_no_return(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)

View File

@ -56,7 +56,7 @@ def test_multiple_eval_dataloaders_tuple(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)
@ -87,7 +87,7 @@ def test_multiple_eval_dataloaders_list(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)
@ -145,7 +145,7 @@ def test_multiple_optimizers_multiple_dataloaders(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)

View File

@ -41,7 +41,7 @@ def test_multiple_optimizers(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)

View File

@ -29,7 +29,7 @@ def test_overfit_multiple_val_loaders(tmpdir):
default_root_dir=tmpdir,
max_epochs=2,
overfit_batches=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)

View File

@ -217,7 +217,7 @@ def test_eval_float_logging(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)

View File

@ -336,7 +336,7 @@ def test_tbptt_log(tmpdir):
limit_val_batches=0,
truncated_bptt_steps=truncated_bptt_steps,
max_epochs=2,
row_log_interval=2,
log_every_n_steps=2,
weights_summary=None,
)
trainer.fit(model)

View File

@ -19,7 +19,7 @@ def test_eval_train_calls(test_train_mock, test_eval_mock, val_train_mock, val_e
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)

View File

@ -23,7 +23,7 @@ def test_no_depre_without_epoch_end(tmpdir):
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
row_log_interval=1,
log_every_n_steps=1,
weights_summary=None,
)