From 0e1e14f8158e75e2b9b9b78022645932fded5a12 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:47:30 +0200 Subject: [PATCH] docs: fix removed ref to `deepspeed.initialize` (#20353) * docs: fix removed ref to `deepspeed.initialize` * fix links --- docs/source-pytorch/accelerators/tpu_advanced.rst | 4 ++-- docs/source-pytorch/accelerators/tpu_basic.rst | 4 ++-- docs/source-pytorch/accelerators/tpu_faq.rst | 2 +- src/lightning/fabric/strategies/deepspeed.py | 2 +- src/lightning/fabric/strategies/xla_fsdp.py | 2 +- src/lightning/pytorch/strategies/deepspeed.py | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/source-pytorch/accelerators/tpu_advanced.rst b/docs/source-pytorch/accelerators/tpu_advanced.rst index e410c6e825..d74f9b0737 100644 --- a/docs/source-pytorch/accelerators/tpu_advanced.rst +++ b/docs/source-pytorch/accelerators/tpu_advanced.rst @@ -52,7 +52,7 @@ Example: model = WeightSharingModule() trainer = Trainer(max_epochs=1, accelerator="tpu") -See `XLA Documentation `_ +See `XLA Documentation `_ ---- @@ -61,4 +61,4 @@ XLA XLA is the library that interfaces PyTorch with the TPUs. For more information check out `XLA `_. -Guide for `troubleshooting XLA `_ +Guide for `troubleshooting XLA `_ diff --git a/docs/source-pytorch/accelerators/tpu_basic.rst b/docs/source-pytorch/accelerators/tpu_basic.rst index fb4e2b7bde..217b76106a 100644 --- a/docs/source-pytorch/accelerators/tpu_basic.rst +++ b/docs/source-pytorch/accelerators/tpu_basic.rst @@ -108,7 +108,7 @@ There are cases in which training on TPUs is slower when compared with GPUs, for - XLA Graph compilation during the initial steps `Reference `_ - Some tensor ops are not fully supported on TPU, or not supported at all. These operations will be performed on CPU (context switch). -The official PyTorch XLA `performance guide `_ +The official PyTorch XLA `performance guide `_ has more detailed information on how PyTorch code can be optimized for TPU. In particular, the -`metrics report `_ allows +`metrics report `_ allows one to identify operations that lead to context switching. diff --git a/docs/source-pytorch/accelerators/tpu_faq.rst b/docs/source-pytorch/accelerators/tpu_faq.rst index f4b2c60633..109449ef2c 100644 --- a/docs/source-pytorch/accelerators/tpu_faq.rst +++ b/docs/source-pytorch/accelerators/tpu_faq.rst @@ -78,7 +78,7 @@ A lot of PyTorch operations aren't lowered to XLA, which could lead to significa These operations are moved to the CPU memory and evaluated, and then the results are transferred back to the XLA device(s). By using the `xla_debug` Strategy, users could create a metrics report to diagnose issues. -The report includes things like (`XLA Reference `_): +The report includes things like (`XLA Reference `_): * how many times we issue XLA compilations and time spent on issuing. * how many times we execute and time spent on execution diff --git a/src/lightning/fabric/strategies/deepspeed.py b/src/lightning/fabric/strategies/deepspeed.py index 93a17f10c8..e71b8e2db3 100644 --- a/src/lightning/fabric/strategies/deepspeed.py +++ b/src/lightning/fabric/strategies/deepspeed.py @@ -598,7 +598,7 @@ class DeepSpeedStrategy(DDPStrategy, _Sharded): ) -> Tuple["DeepSpeedEngine", Optimizer]: """Initialize one model and one optimizer with an optional learning rate scheduler. - This calls :func:`deepspeed.initialize` internally. + This calls ``deepspeed.initialize`` internally. """ import deepspeed diff --git a/src/lightning/fabric/strategies/xla_fsdp.py b/src/lightning/fabric/strategies/xla_fsdp.py index 6da693bafb..e4c080d811 100644 --- a/src/lightning/fabric/strategies/xla_fsdp.py +++ b/src/lightning/fabric/strategies/xla_fsdp.py @@ -56,7 +56,7 @@ class XLAFSDPStrategy(ParallelStrategy, _Sharded): .. warning:: This is an :ref:`experimental ` feature. - For more information check out https://github.com/pytorch/xla/blob/master/docs/fsdp.md + For more information check out https://github.com/pytorch/xla/blob/v2.5.0/docs/fsdp.md Args: auto_wrap_policy: Same as ``auto_wrap_policy`` parameter in diff --git a/src/lightning/pytorch/strategies/deepspeed.py b/src/lightning/pytorch/strategies/deepspeed.py index 382f807089..1eaa5bab75 100644 --- a/src/lightning/pytorch/strategies/deepspeed.py +++ b/src/lightning/pytorch/strategies/deepspeed.py @@ -414,7 +414,7 @@ class DeepSpeedStrategy(DDPStrategy): ) -> Tuple["deepspeed.DeepSpeedEngine", Optimizer]: """Initialize one model and one optimizer with an optional learning rate scheduler. - This calls :func:`deepspeed.initialize` internally. + This calls ``deepspeed.initialize`` internally. """ import deepspeed