Don't raise DeprecationWarning for `LoggerConnector.gpus_metrics` (#9959)
This commit is contained in:
parent
65150cdb42
commit
203737bfce
|
@ -56,16 +56,15 @@ argument of :class:`~pytorch_lightning.trainer.trainer.Trainer`)
|
|||
|
||||
----------------
|
||||
|
||||
Log GPU usage
|
||||
-------------
|
||||
Logs (to a logger) the GPU usage for each GPU on the master machine.
|
||||
|
||||
(See: :paramref:`~pytorch_lightning.trainer.trainer.Trainer.log_gpu_memory`
|
||||
argument of :class:`~pytorch_lightning.trainer.trainer.Trainer`)
|
||||
Log device stats
|
||||
----------------
|
||||
Monitor and log device stats during training with the :class:`~pytorch_lightning.callbacks.device_stats_monitor.DeviceStatsMonitor`.
|
||||
|
||||
.. testcode::
|
||||
|
||||
trainer = Trainer(log_gpu_memory=True)
|
||||
from pytorch_lightning.callbacks import DeviceStatsMonitor
|
||||
|
||||
trainer = Trainer(callbacks=[DeviceStatsMonitor()])
|
||||
|
||||
----------------
|
||||
|
||||
|
|
|
@ -322,10 +322,6 @@ class LoggerConnector:
|
|||
.. deprecated:: v1.5
|
||||
Will be removed in v1.7.
|
||||
"""
|
||||
rank_zero_deprecation(
|
||||
"The property `LoggerConnector.gpus_metrics` was deprecated in v1.5"
|
||||
" and will be removed in 1.7. Use the `DeviceStatsMonitor` callback instead."
|
||||
)
|
||||
if self.trainer._device_type == DeviceType.GPU and self.log_gpu_memory:
|
||||
mem_map = memory.get_memory_profile(self.log_gpu_memory)
|
||||
self._gpus_metrics.update(mem_map)
|
||||
|
|
|
@ -21,7 +21,6 @@ from pytorch_lightning import Callback, LightningDataModule, Trainer
|
|||
from pytorch_lightning.callbacks.gpu_stats_monitor import GPUStatsMonitor
|
||||
from pytorch_lightning.callbacks.xla_stats_monitor import XLAStatsMonitor
|
||||
from pytorch_lightning.loggers import LoggerCollection, TestTubeLogger
|
||||
from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector
|
||||
from tests.deprecated_api import _soft_unimport_module
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import MNISTDataModule
|
||||
|
@ -375,10 +374,7 @@ def test_v1_7_0_trainer_log_gpu_memory(tmpdir):
|
|||
with pytest.deprecated_call(
|
||||
match="Setting `log_gpu_memory` with the trainer flag is deprecated in v1.5 and will be removed"
|
||||
):
|
||||
trainer = Trainer(log_gpu_memory="min_max")
|
||||
with pytest.deprecated_call(match="The property `LoggerConnector.gpus_metrics` was deprecated in v1.5"):
|
||||
lg = LoggerConnector(trainer)
|
||||
_ = lg.gpus_metrics
|
||||
_ = Trainer(log_gpu_memory="min_max")
|
||||
|
||||
|
||||
@RunIf(min_gpus=1)
|
||||
|
|
|
@ -171,7 +171,6 @@ def test_mlflow_logger_dirs_creation(tmpdir):
|
|||
max_epochs=1,
|
||||
limit_train_batches=limit_batches,
|
||||
limit_val_batches=limit_batches,
|
||||
log_gpu_memory=True,
|
||||
)
|
||||
trainer.fit(model)
|
||||
assert set(os.listdir(tmpdir / exp_id)) == {run_id, "meta.yaml"}
|
||||
|
|
|
@ -684,28 +684,6 @@ def test_sanity_metrics_are_reset(tmpdir):
|
|||
assert "val_loss" not in trainer.progress_bar_metrics
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.parametrize("log_gpu_memory", ["all", "min_max"])
|
||||
def test_log_gpu_memory_without_logging_on_step(tmpdir, log_gpu_memory):
|
||||
|
||||
model = BoringModel()
|
||||
trainer = Trainer(
|
||||
default_root_dir=tmpdir,
|
||||
max_epochs=1,
|
||||
limit_train_batches=1,
|
||||
limit_val_batches=0,
|
||||
log_gpu_memory=log_gpu_memory,
|
||||
log_every_n_steps=1,
|
||||
gpus=[1],
|
||||
)
|
||||
trainer.fit(model)
|
||||
if log_gpu_memory == "min_max":
|
||||
assert "min_gpu_mem" in trainer.logged_metrics
|
||||
assert "max_gpu_mem" in trainer.logged_metrics
|
||||
else:
|
||||
assert "gpu_id: 1/memory.used (MB)" in trainer.logged_metrics
|
||||
|
||||
|
||||
@RunIf(min_gpus=1)
|
||||
def test_move_metrics_to_cpu(tmpdir):
|
||||
class TestModel(BoringModel):
|
||||
|
|
|
@ -128,7 +128,6 @@ def test_add_argparse_args_redefined_error(cli_args: list, monkeypatch):
|
|||
# They should not be changed by the argparse interface.
|
||||
"min_steps": None,
|
||||
"max_steps": None,
|
||||
"log_gpu_memory": None,
|
||||
"accelerator": None,
|
||||
"weights_save_path": None,
|
||||
"resume_from_checkpoint": None,
|
||||
|
|
|
@ -134,7 +134,6 @@ def test_add_argparse_args_redefined_error(cli_args, monkeypatch):
|
|||
# interface.
|
||||
min_steps=None,
|
||||
max_steps=None,
|
||||
log_gpu_memory=None,
|
||||
distributed_backend=None,
|
||||
weights_save_path=None,
|
||||
resume_from_checkpoint=None,
|
||||
|
|
Loading…
Reference in New Issue