diff --git a/pytorch_lightning/logging/mlflow_logger.py b/pytorch_lightning/logging/mlflow_logger.py index 70609ea334..970dfe0309 100644 --- a/pytorch_lightning/logging/mlflow_logger.py +++ b/pytorch_lightning/logging/mlflow_logger.py @@ -53,4 +53,6 @@ class MLFlowLogger(LightningLoggerBase): @rank_zero_only def finalize(self, status="FINISHED"): + if status == 'success': + status = 'FINISHED' self.client.set_terminated(self.run_id, status) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 352a4283b0..dd82739feb 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -1072,6 +1072,9 @@ class Trainer(TrainerIO): if stop: return + if self.logger is not None: + self.logger.finalize("success") + def run_training_epoch(self): # before epoch hook if self.__is_function_implemented('on_epoch_start'): diff --git a/tests/test_logging.py b/tests/test_logging.py index b50f87f240..b8436a9d0f 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -7,6 +7,7 @@ import torch from pytorch_lightning import Trainer from pytorch_lightning.testing import LightningTestModel +from pytorch_lightning.logging import LightningLoggerBase, rank_zero_only from .test_models import get_hparams, get_test_tube_logger, init_save_dir, clear_save_dir RANDOM_SEEDS = list(np.random.randint(0, 10000, 1000)) @@ -134,6 +135,46 @@ def test_mlflow_pickle(): trainer2.logger.log_metrics({"acc": 1.0}) +def test_custom_logger(): + + class CustomLogger(LightningLoggerBase): + def __init__(self): + super().__init__() + self.hparams_logged = None + self.metrics_logged = None + self.finalized = False + + @rank_zero_only + def log_hyperparams(self, params): + self.hparams_logged = params + + @rank_zero_only + def log_metrics(self, metrics, step_num): + self.metrics_logged = metrics + + @rank_zero_only + def finalize(self, status): + self.finalized_status = status + + hparams = get_hparams() + model = LightningTestModel(hparams) + + logger = CustomLogger() + + trainer_options = dict( + max_nb_epochs=1, + train_percent_check=0.01, + logger=logger + ) + + trainer = Trainer(**trainer_options) + result = trainer.fit(model) + assert result == 1, "Training failed" + assert logger.hparams_logged == hparams + assert logger.metrics_logged != {} + assert logger.finalized_status == "success" + + def reset_seed(): SEED = RANDOM_SEEDS.pop() torch.manual_seed(SEED)