diff --git a/CHANGELOG.md b/CHANGELOG.md index 6399a119ea..7c19a41a64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -198,6 +198,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - `LightningCLI` now aborts with a clearer message if config already exists and disables save config during `fast_dev_run`([#7963](https://github.com/PyTorchLightning/pytorch-lightning/pull/7963)) +- Improve `PyTorchProfiler` chrome traces names ([#8009](https://github.com/PyTorchLightning/pytorch-lightning/pull/8009)) + + - `Trainer(resume_from_checkpoint=...)` now restores the model directly after `LightningModule.setup()`, which is before `LightningModule.configure_sharded_model()` ([#7652](https://github.com/PyTorchLightning/pytorch-lightning/pull/7652)) diff --git a/pytorch_lightning/profiler/base.py b/pytorch_lightning/profiler/base.py index d327d34e8d..8b5bf5483d 100644 --- a/pytorch_lightning/profiler/base.py +++ b/pytorch_lightning/profiler/base.py @@ -112,14 +112,19 @@ class BaseProfiler(AbstractProfiler): if self._local_rank in (None, 0): log.info(*args, **kwargs) - def _prepare_filename(self, extension: str = ".txt") -> str: - filename = "" + def _prepare_filename( + self, action_name: Optional[str] = None, extension: str = ".txt", split_token: str = "-" + ) -> str: + args = [] if self._stage is not None: - filename += f"{self._stage}-" - filename += str(self.filename) + args.append(self._stage) + if self.filename: + args.append(self.filename) if self._local_rank is not None: - filename += f"-{self._local_rank}" - filename += extension + args.append(str(self._local_rank)) + if action_name is not None: + args.append(action_name) + filename = split_token.join(args) + extension return filename def _prepare_streams(self) -> None: diff --git a/pytorch_lightning/profiler/pytorch.py b/pytorch_lightning/profiler/pytorch.py index e04f880063..0c0bde515a 100644 --- a/pytorch_lightning/profiler/pytorch.py +++ b/pytorch_lightning/profiler/pytorch.py @@ -427,11 +427,15 @@ class PyTorchProfiler(BaseProfiler): def on_trace_ready(profiler): if self.dirpath is not None: if self._export_to_chrome: - handler = tensorboard_trace_handler(self.dirpath, self._prepare_filename(extension="")) + handler = tensorboard_trace_handler( + self.dirpath, self._prepare_filename(action_name=action_name, extension="") + ) handler(profiler) if self._export_to_flame_graph: - path = os.path.join(self.dirpath, self._prepare_filename(extension=".stack")) + path = os.path.join( + self.dirpath, self._prepare_filename(action_name=action_name, extension=".stack") + ) profiler.export_stacks(path, metric=self._metric) else: rank_zero_warn("The PyTorchProfiler failed to export trace as `dirpath` is None") diff --git a/tests/test_profiler.py b/tests/test_profiler.py index acc2bac1c4..d940d4426b 100644 --- a/tests/test_profiler.py +++ b/tests/test_profiler.py @@ -331,8 +331,8 @@ def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler): files = [file for file in files if file.endswith('.json')] assert len(files) == 2, files local_rank = trainer.local_rank - assert any(f'training_step_{local_rank}' in f for f in files) - assert any(f'validation_step_{local_rank}' in f for f in files) + assert any(f'{local_rank}-training_step_and_backward' in f for f in files) + assert any(f'{local_rank}-validation_step' in f for f in files) def test_pytorch_profiler_trainer_test(tmpdir):