2020-10-13 11:18:07 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2020-06-25 13:22:28 +00:00
|
|
|
import os
|
2020-03-14 17:02:05 +00:00
|
|
|
from argparse import Namespace
|
2020-08-24 09:28:56 +00:00
|
|
|
from distutils.version import LooseVersion
|
2020-11-25 19:44:05 +00:00
|
|
|
from unittest import mock
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
import pytest
|
|
|
|
import torch
|
2020-06-25 13:22:28 +00:00
|
|
|
import yaml
|
2020-08-07 13:13:21 +00:00
|
|
|
from omegaconf import OmegaConf
|
2020-09-15 21:48:48 +00:00
|
|
|
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
|
2020-03-03 01:49:14 +00:00
|
|
|
|
2020-12-21 09:15:04 +00:00
|
|
|
from pytorch_lightning import Trainer
|
2020-03-19 13:14:29 +00:00
|
|
|
from pytorch_lightning.loggers import TensorBoardLogger
|
2021-02-09 10:10:52 +00:00
|
|
|
from tests.helpers import BoringModel
|
2020-06-25 13:22:28 +00:00
|
|
|
|
|
|
|
|
2020-08-07 13:13:21 +00:00
|
|
|
@pytest.mark.skipif(
|
2020-08-24 09:28:56 +00:00
|
|
|
LooseVersion(torch.__version__) < LooseVersion("1.5.0"),
|
2020-08-07 13:13:21 +00:00
|
|
|
reason="Minimal PT version is set to 1.5",
|
|
|
|
)
|
2020-06-25 13:22:28 +00:00
|
|
|
def test_tensorboard_hparams_reload(tmpdir):
|
2021-02-06 11:07:26 +00:00
|
|
|
|
2021-02-05 10:32:31 +00:00
|
|
|
class CustomModel(BoringModel):
|
2021-02-06 11:07:26 +00:00
|
|
|
|
2021-02-05 10:32:31 +00:00
|
|
|
def __init__(self, b1=0.5, b2=0.999):
|
|
|
|
super().__init__()
|
|
|
|
self.save_hyperparameters()
|
2020-06-25 13:22:28 +00:00
|
|
|
|
2021-02-05 10:32:31 +00:00
|
|
|
trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
|
|
|
|
model = CustomModel()
|
2021-02-02 17:06:11 +00:00
|
|
|
assert trainer.log_dir == trainer.logger.log_dir
|
2020-06-25 13:22:28 +00:00
|
|
|
trainer.fit(model)
|
|
|
|
|
2021-02-02 17:06:11 +00:00
|
|
|
assert trainer.log_dir == trainer.logger.log_dir
|
|
|
|
folder_path = trainer.log_dir
|
2020-06-25 13:22:28 +00:00
|
|
|
|
|
|
|
# make sure yaml is there
|
2020-08-07 13:13:21 +00:00
|
|
|
with open(os.path.join(folder_path, "hparams.yaml")) as file:
|
2020-06-25 13:22:28 +00:00
|
|
|
# The FullLoader parameter handles the conversion from YAML
|
|
|
|
# scalar values to Python the dictionary format
|
2020-07-09 11:15:41 +00:00
|
|
|
yaml_params = yaml.safe_load(file)
|
2020-08-07 13:13:21 +00:00
|
|
|
assert yaml_params["b1"] == 0.5
|
2021-02-05 10:32:31 +00:00
|
|
|
assert yaml_params["b2"] == 0.999
|
|
|
|
assert len(yaml_params.keys()) == 2
|
2020-06-25 13:22:28 +00:00
|
|
|
|
|
|
|
# verify artifacts
|
2020-08-07 13:13:21 +00:00
|
|
|
assert len(os.listdir(os.path.join(folder_path, "checkpoints"))) == 1
|
2020-09-28 00:40:02 +00:00
|
|
|
|
2020-09-15 21:48:48 +00:00
|
|
|
# verify tb logs
|
|
|
|
event_acc = EventAccumulator(folder_path)
|
|
|
|
event_acc.Reload()
|
|
|
|
|
2021-02-05 10:32:31 +00:00
|
|
|
data_pt_1_5 = b'\x12\x1b"\x04\n\x02b1"\x04\n\x02b2*\r\n\x0b\x12\thp_metric'
|
|
|
|
data_pt_1_6 = b'\x12\x1f"\x06\n\x02b1 \x03"\x06\n\x02b2 \x03*\r\n\x0b\x12\thp_metric'
|
2020-09-15 21:48:48 +00:00
|
|
|
hparams_data = data_pt_1_6 if LooseVersion(torch.__version__) >= LooseVersion("1.6.0") else data_pt_1_5
|
2020-09-28 00:40:02 +00:00
|
|
|
|
2020-09-15 21:48:48 +00:00
|
|
|
assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.plugin_name == 'hparams'
|
|
|
|
assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.content == hparams_data
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_tensorboard_automatic_versioning(tmpdir):
|
|
|
|
"""Verify that automatic versioning works"""
|
|
|
|
|
2020-07-09 11:15:41 +00:00
|
|
|
root_dir = tmpdir / "tb_versioning"
|
|
|
|
root_dir.mkdir()
|
|
|
|
(root_dir / "version_0").mkdir()
|
|
|
|
(root_dir / "version_1").mkdir()
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
|
|
|
|
assert logger.version == 2
|
|
|
|
|
|
|
|
|
|
|
|
def test_tensorboard_manual_versioning(tmpdir):
|
|
|
|
"""Verify that manual versioning works"""
|
|
|
|
|
2020-07-09 11:15:41 +00:00
|
|
|
root_dir = tmpdir / "tb_versioning"
|
|
|
|
root_dir.mkdir()
|
|
|
|
(root_dir / "version_0").mkdir()
|
|
|
|
(root_dir / "version_1").mkdir()
|
|
|
|
(root_dir / "version_2").mkdir()
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
|
|
|
|
|
|
|
|
assert logger.version == 1
|
|
|
|
|
|
|
|
|
|
|
|
def test_tensorboard_named_version(tmpdir):
|
|
|
|
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
|
|
|
|
|
2020-07-09 11:15:41 +00:00
|
|
|
name = "tb_versioning"
|
|
|
|
(tmpdir / name).mkdir()
|
2020-03-03 01:49:14 +00:00
|
|
|
expected_version = "2020-02-05-162402"
|
|
|
|
|
2020-07-09 11:15:41 +00:00
|
|
|
logger = TensorBoardLogger(save_dir=tmpdir, name=name, version=expected_version)
|
2021-01-05 22:04:53 +00:00
|
|
|
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
assert logger.version == expected_version
|
2020-07-09 11:15:41 +00:00
|
|
|
assert os.listdir(tmpdir / name) == [expected_version]
|
|
|
|
assert os.listdir(tmpdir / name / expected_version)
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
|
2020-08-07 13:13:21 +00:00
|
|
|
@pytest.mark.parametrize("name", ["", None])
|
2020-04-15 00:32:33 +00:00
|
|
|
def test_tensorboard_no_name(tmpdir, name):
|
2020-03-03 01:49:14 +00:00
|
|
|
"""Verify that None or empty name works"""
|
2020-04-15 00:32:33 +00:00
|
|
|
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
|
2021-01-05 22:04:53 +00:00
|
|
|
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
|
2020-03-03 01:49:14 +00:00
|
|
|
assert logger.root_dir == tmpdir
|
2020-08-07 13:13:21 +00:00
|
|
|
assert os.listdir(tmpdir / "version_0")
|
2020-03-03 01:49:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("step_idx", [10, None])
|
|
|
|
def test_tensorboard_log_metrics(tmpdir, step_idx):
|
|
|
|
logger = TensorBoardLogger(tmpdir)
|
|
|
|
metrics = {
|
|
|
|
"float": 0.3,
|
|
|
|
"int": 1,
|
|
|
|
"FloatTensor": torch.tensor(0.1),
|
2020-08-07 13:13:21 +00:00
|
|
|
"IntTensor": torch.tensor(1),
|
2020-03-03 01:49:14 +00:00
|
|
|
}
|
|
|
|
logger.log_metrics(metrics, step_idx)
|
|
|
|
|
|
|
|
|
|
|
|
def test_tensorboard_log_hyperparams(tmpdir):
|
|
|
|
logger = TensorBoardLogger(tmpdir)
|
|
|
|
hparams = {
|
|
|
|
"float": 0.3,
|
|
|
|
"int": 1,
|
|
|
|
"string": "abc",
|
2020-03-14 17:02:05 +00:00
|
|
|
"bool": True,
|
2021-02-06 11:07:26 +00:00
|
|
|
"dict": {
|
|
|
|
"a": {
|
|
|
|
"b": "c"
|
|
|
|
}
|
|
|
|
},
|
2020-03-14 17:02:05 +00:00
|
|
|
"list": [1, 2, 3],
|
2020-08-07 13:13:21 +00:00
|
|
|
"namespace": Namespace(foo=Namespace(bar="buzz")),
|
|
|
|
"layer": torch.nn.BatchNorm1d,
|
2020-03-03 01:49:14 +00:00
|
|
|
}
|
|
|
|
logger.log_hyperparams(hparams)
|
2020-04-27 07:52:31 +00:00
|
|
|
|
|
|
|
|
2020-04-27 07:53:59 +00:00
|
|
|
def test_tensorboard_log_hparams_and_metrics(tmpdir):
|
2020-08-24 06:57:04 +00:00
|
|
|
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
|
2020-04-27 07:50:01 +00:00
|
|
|
hparams = {
|
|
|
|
"float": 0.3,
|
|
|
|
"int": 1,
|
|
|
|
"string": "abc",
|
|
|
|
"bool": True,
|
2021-02-06 11:07:26 +00:00
|
|
|
"dict": {
|
|
|
|
"a": {
|
|
|
|
"b": "c"
|
|
|
|
}
|
|
|
|
},
|
2020-04-27 07:50:01 +00:00
|
|
|
"list": [1, 2, 3],
|
2020-08-07 13:13:21 +00:00
|
|
|
"namespace": Namespace(foo=Namespace(bar="buzz")),
|
|
|
|
"layer": torch.nn.BatchNorm1d,
|
2020-04-27 07:50:01 +00:00
|
|
|
}
|
2020-08-07 13:13:21 +00:00
|
|
|
metrics = {"abc": torch.tensor([0.54])}
|
|
|
|
logger.log_hyperparams(hparams, metrics)
|
|
|
|
|
|
|
|
|
|
|
|
def test_tensorboard_log_omegaconf_hparams_and_metrics(tmpdir):
|
2020-08-24 06:57:04 +00:00
|
|
|
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
|
2020-08-07 13:13:21 +00:00
|
|
|
hparams = {
|
|
|
|
"float": 0.3,
|
|
|
|
"int": 1,
|
|
|
|
"string": "abc",
|
|
|
|
"bool": True,
|
2021-02-06 11:07:26 +00:00
|
|
|
"dict": {
|
|
|
|
"a": {
|
|
|
|
"b": "c"
|
|
|
|
}
|
|
|
|
},
|
2020-08-07 13:13:21 +00:00
|
|
|
"list": [1, 2, 3],
|
|
|
|
# "namespace": Namespace(foo=Namespace(bar="buzz")),
|
|
|
|
# "layer": torch.nn.BatchNorm1d,
|
|
|
|
}
|
|
|
|
hparams = OmegaConf.create(hparams)
|
|
|
|
|
|
|
|
metrics = {"abc": torch.tensor([0.54])}
|
2020-04-27 07:50:01 +00:00
|
|
|
logger.log_hyperparams(hparams, metrics)
|
2020-08-19 23:08:46 +00:00
|
|
|
|
|
|
|
|
2021-01-10 12:30:06 +00:00
|
|
|
@pytest.mark.parametrize("example_input_array", [None, torch.rand(2, 32)])
|
2020-08-19 23:08:46 +00:00
|
|
|
def test_tensorboard_log_graph(tmpdir, example_input_array):
|
|
|
|
""" test that log graph works with both model.example_input_array and
|
|
|
|
if array is passed externaly
|
|
|
|
"""
|
2021-01-10 12:30:06 +00:00
|
|
|
model = BoringModel()
|
2020-08-22 10:35:09 +00:00
|
|
|
if example_input_array is not None:
|
2020-08-19 23:08:46 +00:00
|
|
|
model.example_input_array = None
|
2021-01-10 12:30:06 +00:00
|
|
|
|
2020-08-22 10:35:09 +00:00
|
|
|
logger = TensorBoardLogger(tmpdir, log_graph=True)
|
2020-08-19 23:08:46 +00:00
|
|
|
logger.log_graph(model, example_input_array)
|
|
|
|
|
|
|
|
|
|
|
|
def test_tensorboard_log_graph_warning_no_example_input_array(tmpdir):
|
|
|
|
""" test that log graph throws warning if model.example_input_array is None """
|
2021-01-10 12:30:06 +00:00
|
|
|
model = BoringModel()
|
2020-08-19 23:08:46 +00:00
|
|
|
model.example_input_array = None
|
2020-08-22 10:35:09 +00:00
|
|
|
logger = TensorBoardLogger(tmpdir, log_graph=True)
|
2020-08-19 23:08:46 +00:00
|
|
|
with pytest.warns(
|
|
|
|
UserWarning,
|
|
|
|
match='Could not log computational graph since the `model.example_input_array`'
|
2021-02-06 11:07:26 +00:00
|
|
|
' attribute is not set or `input_array` was not given'
|
2020-08-19 23:08:46 +00:00
|
|
|
):
|
|
|
|
logger.log_graph(model)
|
2020-11-25 19:44:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
@mock.patch('pytorch_lightning.loggers.TensorBoardLogger.log_metrics')
|
|
|
|
@pytest.mark.parametrize('expected', [
|
|
|
|
([5, 11, 17]),
|
|
|
|
])
|
|
|
|
def test_tensorboard_with_accummulated_gradients(mock_log_metrics, expected, tmpdir):
|
|
|
|
"""
|
|
|
|
Tests to ensure that tensorboard log properly when accumulated_gradients > 1
|
|
|
|
"""
|
2021-02-06 11:07:26 +00:00
|
|
|
|
2020-11-25 19:44:05 +00:00
|
|
|
class TestModel(BoringModel):
|
2021-01-09 00:35:47 +00:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self._count = 0
|
|
|
|
self._indexes = []
|
2020-11-25 19:44:05 +00:00
|
|
|
|
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
output = self.layer(batch)
|
|
|
|
loss = self.loss(batch, output)
|
|
|
|
self.log('count', self._count, on_step=True, on_epoch=True)
|
|
|
|
self.log('loss', loss, on_step=True, on_epoch=True)
|
|
|
|
|
2021-01-09 00:35:47 +00:00
|
|
|
if not self.trainer.train_loop.should_accumulate():
|
|
|
|
if self.trainer.logger_connector.should_update_logs:
|
|
|
|
self._indexes.append(self.trainer.global_step)
|
2020-11-25 19:44:05 +00:00
|
|
|
|
|
|
|
return loss
|
|
|
|
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
output = self.layer(batch)
|
|
|
|
loss = self.loss(batch, output)
|
|
|
|
self.log('val_loss', loss, on_step=True, on_epoch=True)
|
|
|
|
return loss
|
|
|
|
|
|
|
|
def configure_optimizers(self):
|
|
|
|
optimizer = torch.optim.SGD(self.layer.parameters(), lr=.001)
|
|
|
|
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
|
|
|
|
return [optimizer], [lr_scheduler]
|
|
|
|
|
|
|
|
model = TestModel()
|
|
|
|
model.training_epoch_end = None
|
|
|
|
model.validation_epoch_end = None
|
|
|
|
|
|
|
|
logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)
|
|
|
|
|
|
|
|
trainer = Trainer(
|
|
|
|
default_root_dir=tmpdir,
|
|
|
|
limit_train_batches=12,
|
2021-01-09 00:35:47 +00:00
|
|
|
limit_val_batches=0,
|
2020-11-25 19:44:05 +00:00
|
|
|
max_epochs=3,
|
|
|
|
gpus=0,
|
2021-01-09 00:35:47 +00:00
|
|
|
accumulate_grad_batches=2,
|
2020-11-25 19:44:05 +00:00
|
|
|
logger=[logger_0],
|
|
|
|
log_every_n_steps=3,
|
|
|
|
)
|
|
|
|
trainer.fit(model)
|
|
|
|
|
|
|
|
mock_count_epochs = [m[2]["step"] for m in mock_log_metrics.mock_calls if "count_epoch" in m[2]["metrics"]]
|
|
|
|
assert mock_count_epochs == expected
|
2021-01-09 00:35:47 +00:00
|
|
|
|
2020-11-25 19:44:05 +00:00
|
|
|
mock_count_steps = [m[2]["step"] for m in mock_log_metrics.mock_calls if "count_step" in m[2]["metrics"]]
|
|
|
|
assert model._indexes == mock_count_steps
|
2021-01-29 13:08:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
@mock.patch('pytorch_lightning.loggers.tensorboard.SummaryWriter')
|
|
|
|
def test_tensorboard_finalize(summary_writer, tmpdir):
|
|
|
|
""" Test that the SummaryWriter closes in finalize. """
|
|
|
|
logger = TensorBoardLogger(save_dir=tmpdir)
|
|
|
|
logger.finalize("any")
|
|
|
|
summary_writer().flush.assert_called()
|
|
|
|
summary_writer().close.assert_called()
|