2020-10-13 11:18:07 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2022-03-11 14:24:30 +00:00
|
|
|
import contextlib
|
2020-04-15 00:32:33 +00:00
|
|
|
import inspect
|
|
|
|
import pickle
|
2020-07-09 11:15:41 +00:00
|
|
|
from unittest import mock
|
2020-12-21 09:15:04 +00:00
|
|
|
from unittest.mock import ANY
|
2020-04-15 00:32:33 +00:00
|
|
|
|
|
|
|
import pytest
|
2021-01-10 12:30:06 +00:00
|
|
|
import torch
|
2020-04-15 00:32:33 +00:00
|
|
|
|
2020-11-25 19:44:05 +00:00
|
|
|
from pytorch_lightning import Callback, Trainer
|
2022-06-14 23:53:54 +00:00
|
|
|
from pytorch_lightning.demos.boring_classes import BoringModel
|
2020-04-15 00:32:33 +00:00
|
|
|
from pytorch_lightning.loggers import (
|
2020-11-25 19:44:05 +00:00
|
|
|
CometLogger,
|
2021-06-09 13:07:02 +00:00
|
|
|
CSVLogger,
|
2020-06-30 22:09:16 +00:00
|
|
|
MLFlowLogger,
|
|
|
|
NeptuneLogger,
|
2020-11-25 19:44:05 +00:00
|
|
|
TensorBoardLogger,
|
2020-06-30 22:09:16 +00:00
|
|
|
WandbLogger,
|
|
|
|
)
|
2022-04-08 19:41:46 +00:00
|
|
|
from pytorch_lightning.loggers.logger import DummyExperiment
|
2022-06-15 22:10:49 +00:00
|
|
|
from tests_pytorch.helpers.runif import RunIf
|
|
|
|
from tests_pytorch.loggers.test_comet import _patch_comet_atexit
|
|
|
|
from tests_pytorch.loggers.test_mlflow import mock_mlflow_run_creation
|
|
|
|
from tests_pytorch.loggers.test_neptune import create_neptune_mock
|
2020-04-15 00:32:33 +00:00
|
|
|
|
2022-03-11 14:24:30 +00:00
|
|
|
LOGGER_CTX_MANAGERS = (
|
|
|
|
mock.patch("pytorch_lightning.loggers.comet.comet_ml"),
|
|
|
|
mock.patch("pytorch_lightning.loggers.comet.CometOfflineExperiment"),
|
|
|
|
mock.patch("pytorch_lightning.loggers.mlflow.mlflow"),
|
|
|
|
mock.patch("pytorch_lightning.loggers.mlflow.MlflowClient"),
|
|
|
|
mock.patch("pytorch_lightning.loggers.neptune.neptune", new_callable=create_neptune_mock),
|
2022-09-20 10:19:51 +00:00
|
|
|
mock.patch("pytorch_lightning.loggers.neptune._NEPTUNE_AVAILABLE", return_value=True),
|
2022-03-11 14:24:30 +00:00
|
|
|
mock.patch("pytorch_lightning.loggers.wandb.wandb"),
|
2022-07-21 01:07:24 +00:00
|
|
|
mock.patch("pytorch_lightning.loggers.wandb.Run", new=mock.Mock),
|
2022-03-11 14:24:30 +00:00
|
|
|
)
|
|
|
|
ALL_LOGGER_CLASSES = (
|
|
|
|
CometLogger,
|
|
|
|
CSVLogger,
|
|
|
|
MLFlowLogger,
|
|
|
|
NeptuneLogger,
|
|
|
|
TensorBoardLogger,
|
|
|
|
WandbLogger,
|
|
|
|
)
|
|
|
|
ALL_LOGGER_CLASSES_WO_NEPTUNE = tuple(filter(lambda cls: cls is not NeptuneLogger, ALL_LOGGER_CLASSES))
|
|
|
|
ALL_LOGGER_CLASSES_WO_NEPTUNE_WANDB = tuple(filter(lambda cls: cls is not WandbLogger, ALL_LOGGER_CLASSES_WO_NEPTUNE))
|
|
|
|
|
2020-04-15 00:32:33 +00:00
|
|
|
|
2020-04-15 15:14:29 +00:00
|
|
|
def _get_logger_args(logger_class, save_dir):
|
|
|
|
logger_args = {}
|
2021-07-26 11:37:35 +00:00
|
|
|
if "save_dir" in inspect.getfullargspec(logger_class).args:
|
2020-04-15 15:14:29 +00:00
|
|
|
logger_args.update(save_dir=str(save_dir))
|
2021-07-26 11:37:35 +00:00
|
|
|
if "offline_mode" in inspect.getfullargspec(logger_class).args:
|
2020-04-15 15:14:29 +00:00
|
|
|
logger_args.update(offline_mode=True)
|
2021-07-26 11:37:35 +00:00
|
|
|
if "offline" in inspect.getfullargspec(logger_class).args:
|
2020-06-30 22:09:16 +00:00
|
|
|
logger_args.update(offline=True)
|
2021-12-18 01:40:13 +00:00
|
|
|
if issubclass(logger_class, NeptuneLogger):
|
|
|
|
logger_args.update(mode="offline")
|
2020-04-15 15:14:29 +00:00
|
|
|
return logger_args
|
|
|
|
|
|
|
|
|
2021-05-19 19:50:58 +00:00
|
|
|
def _instantiate_logger(logger_class, save_dir, **override_kwargs):
|
|
|
|
args = _get_logger_args(logger_class, save_dir)
|
2020-11-22 05:38:58 +00:00
|
|
|
args.update(**override_kwargs)
|
|
|
|
logger = logger_class(**args)
|
|
|
|
return logger
|
|
|
|
|
|
|
|
|
2022-04-24 18:05:48 +00:00
|
|
|
@pytest.mark.parametrize("logger_class", ALL_LOGGER_CLASSES)
|
2022-03-11 14:24:30 +00:00
|
|
|
def test_loggers_fit_test_all(tmpdir, monkeypatch, logger_class):
|
2021-07-26 11:37:35 +00:00
|
|
|
"""Verify that basic functionality of all loggers."""
|
2022-03-11 14:24:30 +00:00
|
|
|
with contextlib.ExitStack() as stack:
|
|
|
|
for mgr in LOGGER_CTX_MANAGERS:
|
|
|
|
stack.enter_context(mgr)
|
|
|
|
_test_loggers_fit_test(tmpdir, logger_class)
|
2020-10-06 23:50:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _test_loggers_fit_test(tmpdir, logger_class):
|
2021-01-10 12:30:06 +00:00
|
|
|
class CustomModel(BoringModel):
|
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
output = self.layer(batch)
|
|
|
|
loss = self.loss(batch, output)
|
2021-07-26 11:37:35 +00:00
|
|
|
self.log("train_some_val", loss)
|
2021-01-10 12:30:06 +00:00
|
|
|
return {"loss": loss}
|
|
|
|
|
|
|
|
def validation_epoch_end(self, outputs) -> None:
|
2021-07-26 11:37:35 +00:00
|
|
|
avg_val_loss = torch.stack([x["x"] for x in outputs]).mean()
|
2022-04-04 04:01:08 +00:00
|
|
|
self.log_dict({"early_stop_on": avg_val_loss, "val_loss": avg_val_loss**0.5})
|
2021-01-10 12:30:06 +00:00
|
|
|
|
|
|
|
def test_epoch_end(self, outputs) -> None:
|
|
|
|
avg_test_loss = torch.stack([x["y"] for x in outputs]).mean()
|
2021-07-26 11:37:35 +00:00
|
|
|
self.log("test_loss", avg_test_loss)
|
2020-04-15 00:32:33 +00:00
|
|
|
|
|
|
|
class StoreHistoryLogger(logger_class):
|
2021-04-27 20:23:55 +00:00
|
|
|
def __init__(self, *args, **kwargs) -> None:
|
2020-04-15 00:32:33 +00:00
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
self.history = []
|
|
|
|
|
|
|
|
def log_metrics(self, metrics, step):
|
|
|
|
super().log_metrics(metrics, step)
|
|
|
|
self.history.append((step, metrics))
|
|
|
|
|
2020-04-15 15:14:29 +00:00
|
|
|
logger_args = _get_logger_args(logger_class, tmpdir)
|
|
|
|
logger = StoreHistoryLogger(**logger_args)
|
2020-04-15 00:32:33 +00:00
|
|
|
|
2020-07-09 11:15:41 +00:00
|
|
|
if logger_class == WandbLogger:
|
|
|
|
# required mocks for Trainer
|
2021-07-26 11:37:35 +00:00
|
|
|
logger.experiment.id = "foo"
|
2022-06-21 15:25:37 +00:00
|
|
|
logger.experiment.name = "bar"
|
2020-07-09 11:15:41 +00:00
|
|
|
|
2020-10-06 23:50:42 +00:00
|
|
|
if logger_class == CometLogger:
|
2021-07-26 11:37:35 +00:00
|
|
|
logger.experiment.id = "foo"
|
|
|
|
logger.experiment.project_name = "bar"
|
2020-10-06 23:50:42 +00:00
|
|
|
|
2020-10-07 03:49:06 +00:00
|
|
|
if logger_class == MLFlowLogger:
|
|
|
|
logger = mock_mlflow_run_creation(logger, experiment_id="foo", run_id="bar")
|
|
|
|
|
2021-01-10 12:30:06 +00:00
|
|
|
model = CustomModel()
|
2020-04-15 00:32:33 +00:00
|
|
|
trainer = Trainer(
|
|
|
|
max_epochs=1,
|
|
|
|
logger=logger,
|
2021-01-05 02:54:49 +00:00
|
|
|
limit_train_batches=1,
|
|
|
|
limit_val_batches=1,
|
|
|
|
log_every_n_steps=1,
|
2020-07-28 13:47:53 +00:00
|
|
|
default_root_dir=tmpdir,
|
2020-04-15 00:32:33 +00:00
|
|
|
)
|
|
|
|
trainer.fit(model)
|
|
|
|
trainer.test()
|
|
|
|
|
|
|
|
log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history]
|
2020-08-24 06:57:04 +00:00
|
|
|
if logger_class == TensorBoardLogger:
|
2020-10-03 16:33:29 +00:00
|
|
|
expected = [
|
2021-07-26 11:37:35 +00:00
|
|
|
(0, ["epoch", "train_some_val"]),
|
|
|
|
(0, ["early_stop_on", "epoch", "val_loss"]),
|
|
|
|
(1, ["epoch", "test_loss"]),
|
2020-10-03 16:33:29 +00:00
|
|
|
]
|
|
|
|
assert log_metric_names == expected
|
2020-08-24 06:57:04 +00:00
|
|
|
else:
|
2020-10-03 16:33:29 +00:00
|
|
|
expected = [
|
2021-07-26 11:37:35 +00:00
|
|
|
(0, ["epoch", "train_some_val"]),
|
|
|
|
(0, ["early_stop_on", "epoch", "val_loss"]),
|
|
|
|
(1, ["epoch", "test_loss"]),
|
2020-10-03 16:33:29 +00:00
|
|
|
]
|
|
|
|
assert log_metric_names == expected
|
2020-04-15 00:32:33 +00:00
|
|
|
|
|
|
|
|
2021-02-06 11:07:26 +00:00
|
|
|
@pytest.mark.parametrize(
|
2022-03-11 14:24:30 +00:00
|
|
|
"logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE
|
|
|
|
) # WandbLogger and NeptuneLogger get tested separately
|
2020-10-06 14:00:09 +00:00
|
|
|
def test_loggers_pickle_all(tmpdir, monkeypatch, logger_class):
|
2021-09-06 12:49:09 +00:00
|
|
|
"""Test that the logger objects can be pickled.
|
|
|
|
|
|
|
|
This test only makes sense if the packages are installed.
|
|
|
|
"""
|
2020-10-06 14:00:09 +00:00
|
|
|
_patch_comet_atexit(monkeypatch)
|
|
|
|
try:
|
2022-04-24 18:05:48 +00:00
|
|
|
_test_loggers_pickle(tmpdir, monkeypatch, logger_class)
|
2020-10-06 14:00:09 +00:00
|
|
|
except (ImportError, ModuleNotFoundError):
|
|
|
|
pytest.xfail(f"pickle test requires {logger_class.__class__} dependencies to be installed.")
|
|
|
|
|
|
|
|
|
|
|
|
def _test_loggers_pickle(tmpdir, monkeypatch, logger_class):
|
2020-04-15 00:32:33 +00:00
|
|
|
"""Verify that pickling trainer with logger works."""
|
2020-10-05 03:23:58 +00:00
|
|
|
_patch_comet_atexit(monkeypatch)
|
2020-04-15 00:32:33 +00:00
|
|
|
|
2020-04-15 15:14:29 +00:00
|
|
|
logger_args = _get_logger_args(logger_class, tmpdir)
|
|
|
|
logger = logger_class(**logger_args)
|
2020-04-15 00:32:33 +00:00
|
|
|
|
2020-07-05 23:57:22 +00:00
|
|
|
# this can cause pickle error if the experiment object is not picklable
|
|
|
|
# the logger needs to remove it from the state before pickle
|
|
|
|
_ = logger.experiment
|
|
|
|
|
2021-11-08 10:36:35 +00:00
|
|
|
# logger also has to avoid adding un-picklable attributes to self in .save
|
|
|
|
logger.log_metrics({"a": 1})
|
|
|
|
logger.save()
|
|
|
|
|
2020-04-27 11:41:30 +00:00
|
|
|
# test pickling loggers
|
|
|
|
pickle.dumps(logger)
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
trainer = Trainer(max_epochs=1, logger=logger)
|
2020-04-15 00:32:33 +00:00
|
|
|
pkl_bytes = pickle.dumps(trainer)
|
|
|
|
|
|
|
|
trainer2 = pickle.loads(pkl_bytes)
|
2021-07-26 11:37:35 +00:00
|
|
|
trainer2.logger.log_metrics({"acc": 1.0})
|
2020-05-28 02:45:23 +00:00
|
|
|
|
2022-02-17 01:27:51 +00:00
|
|
|
# make sure we restored properly
|
2020-07-05 23:57:22 +00:00
|
|
|
assert trainer2.logger.name == logger.name
|
|
|
|
assert trainer2.logger.save_dir == logger.save_dir
|
|
|
|
|
2020-05-28 02:45:23 +00:00
|
|
|
|
2021-02-06 11:07:26 +00:00
|
|
|
@pytest.mark.parametrize(
|
2021-07-26 11:37:35 +00:00
|
|
|
"extra_params",
|
|
|
|
[
|
|
|
|
pytest.param(dict(max_epochs=1, auto_scale_batch_size=True), id="Batch-size-Finder"),
|
|
|
|
pytest.param(dict(max_epochs=3, auto_lr_find=True), id="LR-Finder"),
|
|
|
|
],
|
2021-02-06 11:07:26 +00:00
|
|
|
)
|
2020-05-28 02:45:23 +00:00
|
|
|
def test_logger_reset_correctly(tmpdir, extra_params):
|
2021-09-06 12:49:09 +00:00
|
|
|
"""Test that the tuners do not alter the logger reference."""
|
2020-05-28 02:45:23 +00:00
|
|
|
|
2021-01-10 12:30:06 +00:00
|
|
|
class CustomModel(BoringModel):
|
|
|
|
def __init__(self, lr=0.1, batch_size=1):
|
|
|
|
super().__init__()
|
|
|
|
self.save_hyperparameters()
|
2020-05-28 02:45:23 +00:00
|
|
|
|
2021-01-10 12:30:06 +00:00
|
|
|
model = CustomModel()
|
2021-07-26 11:37:35 +00:00
|
|
|
trainer = Trainer(default_root_dir=tmpdir, **extra_params)
|
2020-05-28 02:45:23 +00:00
|
|
|
logger1 = trainer.logger
|
2020-08-31 21:36:09 +00:00
|
|
|
trainer.tune(model)
|
2020-05-28 02:45:23 +00:00
|
|
|
logger2 = trainer.logger
|
|
|
|
logger3 = model.logger
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
assert logger1 == logger2, "Finder altered the logger of trainer"
|
|
|
|
assert logger2 == logger3, "Finder altered the logger of model"
|
2020-06-30 22:09:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
class RankZeroLoggerCheck(Callback):
|
|
|
|
# this class has to be defined outside the test function, otherwise we get pickle error
|
|
|
|
# due to the way ddp process is launched
|
|
|
|
|
2021-10-07 10:18:11 +00:00
|
|
|
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
|
2020-06-30 22:09:16 +00:00
|
|
|
is_dummy = isinstance(trainer.logger.experiment, DummyExperiment)
|
|
|
|
if trainer.is_global_zero:
|
|
|
|
assert not is_dummy
|
|
|
|
else:
|
|
|
|
assert is_dummy
|
|
|
|
assert pl_module.logger.experiment.something(foo="bar") is None
|
|
|
|
|
|
|
|
|
2022-03-11 14:24:30 +00:00
|
|
|
@pytest.mark.parametrize("logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE_WANDB)
|
2022-08-17 16:31:20 +00:00
|
|
|
@RunIf(skip_windows=True)
|
2020-10-07 03:49:06 +00:00
|
|
|
def test_logger_created_on_rank_zero_only(tmpdir, monkeypatch, logger_class):
|
2021-09-06 12:49:09 +00:00
|
|
|
"""Test that loggers get replaced by dummy loggers on global rank > 0."""
|
2020-10-05 03:23:58 +00:00
|
|
|
_patch_comet_atexit(monkeypatch)
|
2020-10-07 03:49:06 +00:00
|
|
|
try:
|
2022-04-24 18:05:48 +00:00
|
|
|
_test_logger_created_on_rank_zero_only(tmpdir, logger_class)
|
2020-10-07 03:49:06 +00:00
|
|
|
except (ImportError, ModuleNotFoundError):
|
|
|
|
pytest.xfail(f"multi-process test requires {logger_class.__class__} dependencies to be installed.")
|
|
|
|
|
2020-07-09 11:15:41 +00:00
|
|
|
|
2020-10-07 03:49:06 +00:00
|
|
|
def _test_logger_created_on_rank_zero_only(tmpdir, logger_class):
|
2020-06-30 22:09:16 +00:00
|
|
|
logger_args = _get_logger_args(logger_class, tmpdir)
|
|
|
|
logger = logger_class(**logger_args)
|
2021-01-10 12:30:06 +00:00
|
|
|
model = BoringModel()
|
2020-06-30 22:09:16 +00:00
|
|
|
trainer = Trainer(
|
|
|
|
logger=logger,
|
|
|
|
default_root_dir=tmpdir,
|
2021-10-16 15:10:25 +00:00
|
|
|
strategy="ddp_spawn",
|
2022-01-16 16:12:18 +00:00
|
|
|
accelerator="cpu",
|
|
|
|
devices=2,
|
2020-06-30 22:09:16 +00:00
|
|
|
max_steps=1,
|
|
|
|
callbacks=[RankZeroLoggerCheck()],
|
|
|
|
)
|
2021-01-12 00:36:48 +00:00
|
|
|
trainer.fit(model)
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2020-11-22 05:38:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_logger_with_prefix_all(tmpdir, monkeypatch):
|
2021-09-06 12:49:09 +00:00
|
|
|
"""Test that prefix is added at the beginning of the metric keys."""
|
2021-07-26 11:37:35 +00:00
|
|
|
prefix = "tmp"
|
2020-11-22 05:38:58 +00:00
|
|
|
|
|
|
|
# Comet
|
2021-07-26 11:37:35 +00:00
|
|
|
with mock.patch("pytorch_lightning.loggers.comet.comet_ml"), mock.patch(
|
|
|
|
"pytorch_lightning.loggers.comet.CometOfflineExperiment"
|
|
|
|
):
|
2020-11-22 05:38:58 +00:00
|
|
|
_patch_comet_atexit(monkeypatch)
|
2021-05-19 19:50:58 +00:00
|
|
|
logger = _instantiate_logger(CometLogger, save_dir=tmpdir, prefix=prefix)
|
2020-11-22 05:38:58 +00:00
|
|
|
logger.log_metrics({"test": 1.0}, step=0)
|
|
|
|
logger.experiment.log_metrics.assert_called_once_with({"tmp-test": 1.0}, epoch=None, step=0)
|
|
|
|
|
|
|
|
# MLflow
|
2021-07-26 11:37:35 +00:00
|
|
|
with mock.patch("pytorch_lightning.loggers.mlflow.mlflow"), mock.patch(
|
|
|
|
"pytorch_lightning.loggers.mlflow.MlflowClient"
|
|
|
|
):
|
2021-05-19 19:50:58 +00:00
|
|
|
logger = _instantiate_logger(MLFlowLogger, save_dir=tmpdir, prefix=prefix)
|
2020-11-22 05:38:58 +00:00
|
|
|
logger.log_metrics({"test": 1.0}, step=0)
|
|
|
|
logger.experiment.log_metric.assert_called_once_with(ANY, "tmp-test", 1.0, ANY, 0)
|
|
|
|
|
|
|
|
# Neptune
|
2022-09-20 10:19:51 +00:00
|
|
|
with mock.patch("pytorch_lightning.loggers.neptune.neptune"), mock.patch(
|
|
|
|
"pytorch_lightning.loggers.neptune._NEPTUNE_AVAILABLE", return_value=True
|
|
|
|
):
|
2021-09-10 16:48:58 +00:00
|
|
|
logger = _instantiate_logger(NeptuneLogger, api_key="test", project="project", save_dir=tmpdir, prefix=prefix)
|
|
|
|
assert logger.experiment.__getitem__.call_count == 2
|
2022-01-20 12:07:42 +00:00
|
|
|
logger.log_metrics({"test": 1.0}, step=0)
|
|
|
|
assert logger.experiment.__getitem__.call_count == 3
|
2021-09-10 16:48:58 +00:00
|
|
|
logger.experiment.__getitem__.assert_called_with("tmp/test")
|
|
|
|
logger.experiment.__getitem__().log.assert_called_once_with(1.0)
|
2020-11-22 05:38:58 +00:00
|
|
|
|
|
|
|
# TensorBoard
|
2021-07-26 11:37:35 +00:00
|
|
|
with mock.patch("pytorch_lightning.loggers.tensorboard.SummaryWriter"):
|
2021-05-19 19:50:58 +00:00
|
|
|
logger = _instantiate_logger(TensorBoardLogger, save_dir=tmpdir, prefix=prefix)
|
2020-11-22 05:38:58 +00:00
|
|
|
logger.log_metrics({"test": 1.0}, step=0)
|
|
|
|
logger.experiment.add_scalar.assert_called_once_with("tmp-test", 1.0, 0)
|
|
|
|
|
|
|
|
# WandB
|
2022-07-21 01:07:24 +00:00
|
|
|
with mock.patch("pytorch_lightning.loggers.wandb.wandb") as wandb, mock.patch(
|
|
|
|
"pytorch_lightning.loggers.wandb.Run", new=mock.Mock
|
|
|
|
):
|
2021-05-19 19:50:58 +00:00
|
|
|
logger = _instantiate_logger(WandbLogger, save_dir=tmpdir, prefix=prefix)
|
2020-12-19 12:52:11 +00:00
|
|
|
wandb.run = None
|
|
|
|
wandb.init().step = 0
|
2020-11-22 05:38:58 +00:00
|
|
|
logger.log_metrics({"test": 1.0}, step=0)
|
2021-07-26 11:37:35 +00:00
|
|
|
logger.experiment.log.assert_called_once_with({"tmp-test": 1.0, "trainer/global_step": 0})
|
2022-02-10 09:49:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_logger_default_name(tmpdir):
|
|
|
|
"""Test that the default logger name is lightning_logs."""
|
|
|
|
|
|
|
|
# CSV
|
|
|
|
logger = CSVLogger(save_dir=tmpdir)
|
|
|
|
assert logger.name == "lightning_logs"
|
|
|
|
|
|
|
|
# TensorBoard
|
|
|
|
with mock.patch("pytorch_lightning.loggers.tensorboard.SummaryWriter"):
|
|
|
|
logger = _instantiate_logger(TensorBoardLogger, save_dir=tmpdir)
|
|
|
|
assert logger.name == "lightning_logs"
|
|
|
|
|
|
|
|
# MLflow
|
|
|
|
with mock.patch("pytorch_lightning.loggers.mlflow.mlflow"), mock.patch(
|
|
|
|
"pytorch_lightning.loggers.mlflow.MlflowClient"
|
|
|
|
) as mlflow_client:
|
|
|
|
mlflow_client().get_experiment_by_name.return_value = None
|
|
|
|
logger = _instantiate_logger(MLFlowLogger, save_dir=tmpdir)
|
|
|
|
|
|
|
|
_ = logger.experiment
|
|
|
|
logger._mlflow_client.create_experiment.assert_called_with(name="lightning_logs", artifact_location=ANY)
|
|
|
|
# on MLFLowLogger `name` refers to the experiment id
|
|
|
|
# assert logger.experiment.get_experiment(logger.name).name == "lightning_logs"
|