diff --git a/CHANGELOG.md b/CHANGELOG.md index e65efa50cb..5a60281aa0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Truncated long version numbers in progress bar ([#2594](https://github.com/PyTorchLightning/pytorch-lightning/pull/2594)) ### Deprecated diff --git a/docs/source/experiment_reporting.rst b/docs/source/experiment_reporting.rst index c0c2c8a1b9..46ecf67747 100644 --- a/docs/source/experiment_reporting.rst +++ b/docs/source/experiment_reporting.rst @@ -30,14 +30,14 @@ Control log writing frequency Writing to a logger can be expensive. In Lightning you can set the interval at which you want to log using this trainer flag. -.. seealso:: - :class:`~pytorch_lightning.trainer.trainer.Trainer` - .. testcode:: k = 100 trainer = Trainer(log_save_interval=k) +.. seealso:: + :class:`~pytorch_lightning.trainer.trainer.Trainer` + ---------- Log metrics @@ -94,10 +94,14 @@ For instance, here we log images using tensorboard. Modify progress bar ^^^^^^^^^^^^^^^^^^^ -Each return dict from the training_end, validation_end, testing_end and training_step also has -a key called "progress_bar". +Each return dict from the +:meth:`~pytorch_lightning.core.lightning.LightningModule.training_step`, +:meth:`~pytorch_lightning.core.lightning.LightningModule.training_epoch_end`, +:meth:`~pytorch_lightning.core.lightning.LightningModule.validation_epoch_end` and +:meth:`~pytorch_lightning.core.lightning.LightningModule.test_epoch_end` +can also contain a key called `progress_bar`. -Here we show the validation loss in the progress bar +Here we show the validation loss in the progress bar: .. testcode:: @@ -109,6 +113,10 @@ Here we show the validation loss in the progress bar results = {'progress_bar': logs} return results +The progress bar by default already includes the training loss and version number of the experiment +if you are using a logger. These defaults can be customized by overriding the +:meth:`~pytorch_lightning.core.lightning.LightningModule.get_progress_bar_dict` hook in your module. + ---------- diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index c753ce2fcd..afb9fa0a92 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -1544,7 +1544,6 @@ class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, Mod Example: .. code-block:: python - def on_save_checkpoint(self, checkpoint): # 99% of use cases you don't need to implement this method checkpoint['something_cool_i_want_to_save'] = my_cool_pickable_object @@ -1558,7 +1557,23 @@ class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, Mod def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]: r""" - Additional items to be displayed in the progress bar. + Implement this to override the default items displayed in the progress bar. + By default it includes the average loss value, split index of BPTT (if used) + and the version of the experiment when using a logger. + + .. code-block:: + + Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10] + + Here is an example how to override the defaults: + + .. code-block:: python + + def get_progress_bar_dict(self): + # don't show the version number + items = super().get_progress_bar_dict() + items.pop("v_num", None) + return items Return: Dictionary with the items to be displayed in the progress bar. @@ -1572,7 +1587,10 @@ class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, Mod tqdm_dict['split_idx'] = self.trainer.split_idx if self.trainer.logger is not None and self.trainer.logger.version is not None: - tqdm_dict['v_num'] = self.trainer.logger.version + version = self.trainer.logger.version + # show last 4 places of long version strings + version = version[-4:] if isinstance(version, str) else version + tqdm_dict['v_num'] = version return tqdm_dict