diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index d99c26a5de..088b0fb71e 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1936,7 +1936,7 @@ class Trainer( if self.loggers and self.num_training_batches < self.log_every_n_steps: rank_zero_warn( - f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval" + f"The number of training batches ({self.num_training_batches}) is smaller than the logging interval" f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if" " you want to see logs for the training epoch.", category=PossibleUserWarning, diff --git a/tests/trainer/test_dataloaders.py b/tests/trainer/test_dataloaders.py index 08d54e05bf..66b5be243b 100644 --- a/tests/trainer/test_dataloaders.py +++ b/tests/trainer/test_dataloaders.py @@ -702,11 +702,11 @@ def test_warning_with_small_dataloader_and_logging_interval(tmpdir): dataloader = DataLoader(RandomDataset(32, length=10)) model.train_dataloader = lambda: dataloader - with pytest.warns(UserWarning, match=r"The number of training samples \(10\) is smaller than the logging interval"): + with pytest.warns(UserWarning, match=r"The number of training batches \(10\) is smaller than the logging interval"): trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, log_every_n_steps=11) trainer.fit(model) - with pytest.warns(UserWarning, match=r"The number of training samples \(1\) is smaller than the logging interval"): + with pytest.warns(UserWarning, match=r"The number of training batches \(1\) is smaller than the logging interval"): trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, log_every_n_steps=2, limit_train_batches=1) trainer.fit(model)