Removed `process_position` argument from Trainer Class (#13071)
This commit is contained in:
parent
fab2ff35ad
commit
830da2c4a4
|
@ -112,6 +112,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
|||
|
||||
### Removed
|
||||
|
||||
- Removed the deprecated `process_position` argument from the `Trainer` constructor ([13071](https://github.com/PyTorchLightning/pytorch-lightning/pull/13071))
|
||||
|
||||
|
||||
- Removed the deprecated `checkpoint_callback` argument from the `Trainer` constructor ([#13027](https://github.com/PyTorchLightning/pytorch-lightning/pull/13027))
|
||||
|
||||
|
||||
|
|
|
@ -1214,31 +1214,6 @@ Half precision, or mixed precision, is the combined use of 32 and 16 bit floatin
|
|||
# turn on 16-bit
|
||||
trainer = Trainer(amp_backend="apex", amp_level="O2", precision=16, accelerator="gpu", devices=1)
|
||||
|
||||
|
||||
process_position
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
.. warning:: ``process_position`` has been deprecated in v1.5 and will be removed in v1.7.
|
||||
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``
|
||||
directly to the Trainer's ``callbacks`` argument instead.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<video width="50%" max-width="400px" controls
|
||||
poster="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/thumb/process_position.jpg"
|
||||
src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/process_position.mp4"></video>
|
||||
|
||||
|
|
||||
|
||||
Orders the progress bar. Useful when running multiple trainers on the same node.
|
||||
|
||||
.. testcode::
|
||||
|
||||
# default used by the Trainer
|
||||
trainer = Trainer(process_position=0)
|
||||
|
||||
.. note:: This argument is ignored if a custom callback is passed to :paramref:`~Trainer.callbacks`.
|
||||
|
||||
profiler
|
||||
^^^^^^^^
|
||||
|
||||
|
|
|
@ -45,7 +45,6 @@ class CallbackConnector:
|
|||
callbacks: Optional[Union[List[Callback], Callback]],
|
||||
enable_checkpointing: bool,
|
||||
enable_progress_bar: bool,
|
||||
process_position: int,
|
||||
default_root_dir: Optional[str],
|
||||
weights_save_path: Optional[str],
|
||||
enable_model_summary: bool,
|
||||
|
@ -77,14 +76,7 @@ class CallbackConnector:
|
|||
self._configure_timer_callback(max_time)
|
||||
|
||||
# init progress bar
|
||||
if process_position != 0:
|
||||
rank_zero_deprecation(
|
||||
f"Setting `Trainer(process_position={process_position})` is deprecated in v1.5 and will be removed"
|
||||
" in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with"
|
||||
" `process_position` directly to the Trainer's `callbacks` argument instead."
|
||||
)
|
||||
|
||||
self._configure_progress_bar(process_position, enable_progress_bar)
|
||||
self._configure_progress_bar(enable_progress_bar)
|
||||
|
||||
# configure the ModelSummary callback
|
||||
self._configure_model_summary_callback(enable_model_summary, weights_summary)
|
||||
|
@ -188,7 +180,7 @@ class CallbackConnector:
|
|||
self.trainer.callbacks.append(model_summary)
|
||||
self.trainer._weights_summary = weights_summary
|
||||
|
||||
def _configure_progress_bar(self, process_position: int = 0, enable_progress_bar: bool = True) -> None:
|
||||
def _configure_progress_bar(self, enable_progress_bar: bool = True) -> None:
|
||||
progress_bars = [c for c in self.trainer.callbacks if isinstance(c, ProgressBarBase)]
|
||||
if len(progress_bars) > 1:
|
||||
raise MisconfigurationException(
|
||||
|
@ -210,7 +202,7 @@ class CallbackConnector:
|
|||
)
|
||||
|
||||
if enable_progress_bar:
|
||||
progress_bar_callback = TQDMProgressBar(process_position=process_position)
|
||||
progress_bar_callback = TQDMProgressBar()
|
||||
self.trainer.callbacks.append(progress_bar_callback)
|
||||
|
||||
def _configure_timer_callback(self, max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None) -> None:
|
||||
|
|
|
@ -137,7 +137,6 @@ class Trainer(
|
|||
default_root_dir: Optional[str] = None,
|
||||
gradient_clip_val: Optional[Union[int, float]] = None,
|
||||
gradient_clip_algorithm: Optional[str] = None,
|
||||
process_position: int = 0,
|
||||
num_nodes: int = 1,
|
||||
num_processes: Optional[int] = None, # TODO: Remove in 2.0
|
||||
devices: Optional[Union[List[int], str, int]] = None,
|
||||
|
@ -305,13 +304,6 @@ class Trainer(
|
|||
log_every_n_steps: How often to log within steps.
|
||||
Default: ``50``.
|
||||
|
||||
process_position: Orders the progress bar when running multiple models on same machine.
|
||||
|
||||
.. deprecated:: v1.5
|
||||
``process_position`` has been deprecated in v1.5 and will be removed in v1.7.
|
||||
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``
|
||||
directly to the Trainer's ``callbacks`` argument instead.
|
||||
|
||||
enable_progress_bar: Whether to enable to progress bar by default.
|
||||
Default: ``False``.
|
||||
|
||||
|
@ -508,7 +500,6 @@ class Trainer(
|
|||
callbacks,
|
||||
enable_checkpointing,
|
||||
enable_progress_bar,
|
||||
process_position,
|
||||
default_root_dir,
|
||||
weights_save_path,
|
||||
enable_model_summary,
|
||||
|
|
|
@ -61,11 +61,6 @@ def test_v1_7_0_on_interrupt(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
def test_v1_7_0_process_position_trainer_constructor(tmpdir):
|
||||
with pytest.deprecated_call(match=r"Setting `Trainer\(process_position=5\)` is deprecated in v1.5"):
|
||||
_ = Trainer(process_position=5)
|
||||
|
||||
|
||||
def test_v1_7_0_flush_logs_every_n_steps_trainer_constructor(tmpdir):
|
||||
with pytest.deprecated_call(match=r"Setting `Trainer\(flush_logs_every_n_steps=10\)` is deprecated in v1.5"):
|
||||
_ = Trainer(flush_logs_every_n_steps=10)
|
||||
|
|
Loading…
Reference in New Issue