From 7b3de1215f815791beceb317e590d73718b65cef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 25 Oct 2022 15:23:26 +0200 Subject: [PATCH] Remove examples and loggers from develop dependencies (#15282) * Remove examples and loggers from develop dependencies * remove more references * Fix mypy * Keep logger file for docs mocking * Simpler fix * Fix docs build * Global testsetup * Matching files * Undo change * loggers as info * Clarify * Update requirements/pytorch/loggers.info Co-authored-by: Jirka Co-authored-by: otaj <6065855+otaj@users.noreply.github.com> --- .actions/assistant.py | 1 - .azure/gpu-tests-lite.yml | 3 +-- .azure/gpu-tests-pytorch.yml | 1 + .azure/ipu-tests.yml | 2 +- .github/workflows/docs-checks.yml | 10 ++++------ docs/source-lit/conf.py | 6 +++++- .../common/precision_intermediate.rst | 2 +- docs/source-pytorch/common/trainer.rst | 2 +- docs/source-pytorch/conf.py | 7 ++++--- docs/source-pytorch/extensions/logging.rst | 2 ++ docs/source-pytorch/starter/introduction.rst | 1 + .../visualize/supported_exp_managers.rst | 15 ++++++++++----- requirements/README.md | 2 +- requirements/pytorch/devel.txt | 6 ------ requirements/pytorch/loggers.info | 5 +++++ requirements/pytorch/loggers.txt | 8 -------- src/pytorch_lightning/__setup__.py | 3 +-- src/pytorch_lightning/loggers/neptune.py | 1 + src/pytorch_lightning/loggers/wandb.py | 2 +- .../trainer/connectors/checkpoint_connector.py | 2 +- 20 files changed, 41 insertions(+), 40 deletions(-) create mode 100644 requirements/pytorch/loggers.info delete mode 100644 requirements/pytorch/loggers.txt diff --git a/.actions/assistant.py b/.actions/assistant.py index bccc890afa..a84c176f87 100644 --- a/.actions/assistant.py +++ b/.actions/assistant.py @@ -22,7 +22,6 @@ REQUIREMENT_FILES = { "pytorch": ( "requirements/pytorch/base.txt", "requirements/pytorch/extra.txt", - "requirements/pytorch/loggers.txt", "requirements/pytorch/strategies.txt", "requirements/pytorch/examples.txt", ), diff --git a/.azure/gpu-tests-lite.yml b/.azure/gpu-tests-lite.yml index 16996202c2..7ecb16a4b0 100644 --- a/.azure/gpu-tests-lite.yml +++ b/.azure/gpu-tests-lite.yml @@ -76,8 +76,7 @@ jobs: python ./requirements/pytorch/adjust-versions.py requirements/lite/base.txt ${PYTORCH_VERSION} CUDA_VERSION_MM=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))") - pip install -e .[strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html - pip install --requirement requirements/pytorch/devel.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html + pip install -e .[dev,strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html pip list env: diff --git a/.azure/gpu-tests-pytorch.yml b/.azure/gpu-tests-pytorch.yml index d5c1ca28ef..a79ce2de2a 100644 --- a/.azure/gpu-tests-pytorch.yml +++ b/.azure/gpu-tests-pytorch.yml @@ -110,6 +110,7 @@ jobs: CUDA_VERSION_MM=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))") pip install -e .[strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html pip install --requirement requirements/pytorch/devel.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html + pip install --requirement requirements/pytorch/examples.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html CUDA_VERSION_BAGUA=$(python -c "print([ver for ver in [116,113,111,102] if $CUDA_VERSION_MM >= ver][0])") pip install "bagua-cuda$CUDA_VERSION_BAGUA" diff --git a/.azure/ipu-tests.yml b/.azure/ipu-tests.yml index 379cee7525..51140cb1e3 100644 --- a/.azure/ipu-tests.yml +++ b/.azure/ipu-tests.yml @@ -72,7 +72,7 @@ jobs: export GIT_TERMINAL_PROMPT=1 python ./requirements/pytorch/adjust-versions.py requirements/pytorch/extra.txt python ./requirements/pytorch/adjust-versions.py requirements/pytorch/examples.txt - pip install -e . --requirement ./requirements/pytorch/devel.txt + pip install -e .[dev] pip list env: PACKAGE_NAME: pytorch diff --git a/.github/workflows/docs-checks.yml b/.github/workflows/docs-checks.yml index 782b899e38..598b504f22 100644 --- a/.github/workflows/docs-checks.yml +++ b/.github/workflows/docs-checks.yml @@ -11,6 +11,10 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} +defaults: + run: + shell: bash + jobs: make-doctest: runs-on: ubuntu-20.04 @@ -50,7 +54,6 @@ jobs: -r requirements/${{ matrix.pkg }}/docs.txt \ --find-links https://download.pytorch.org/whl/cpu/torch_stable.html pip list - shell: bash - name: Install dependencies env: @@ -60,15 +63,12 @@ jobs: pip install -r requirements/${{ matrix.pkg }}/devel.txt \ --find-links https://download.pytorch.org/whl/cpu/torch_stable.html pip list - shell: bash - name: Test Documentation env: SPHINX_MOCK_REQUIREMENTS: 0 working-directory: ./docs/source-${{ matrix.pkg }} run: | - # ToDo: proper parametrize - # First run the same pipeline as Read-The-Docs make doctest make coverage @@ -111,8 +111,6 @@ jobs: - name: Make Documentation working-directory: ./docs/source-${{ matrix.pkg }} run: | - # ToDo: rather use python cmd - # First run the same pipeline as Read-The-Docs make html --debug --jobs $(nproc) SPHINXOPTS="-W --keep-going" - name: Upload built docs diff --git a/docs/source-lit/conf.py b/docs/source-lit/conf.py index 8d5c4c4746..ecf45c56ab 100644 --- a/docs/source-lit/conf.py +++ b/docs/source-lit/conf.py @@ -403,11 +403,15 @@ from torch import nn from torch.utils.data import IterableDataset, DataLoader, Dataset from pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything from pytorch_lightning.callbacks import Callback -from pytorch_lightning.cli import LightningCLI +from pytorch_lightning.cli import _JSONARGPARSE_SIGNATURES_AVAILABLE as _JSONARGPARSE_AVAILABLE from pytorch_lightning.utilities import ( _APEX_AVAILABLE, _TORCHVISION_AVAILABLE, _TORCH_GREATER_EQUAL_1_10, ) +from pytorch_lightning.loggers.neptune import _NEPTUNE_AVAILABLE +from pytorch_lightning.loggers.comet import _COMET_AVAILABLE +from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE +from pytorch_lightning.loggers.wandb import _WANDB_AVAILABLE """ coverage_skip_undoc_in_source = True diff --git a/docs/source-pytorch/common/precision_intermediate.rst b/docs/source-pytorch/common/precision_intermediate.rst index 2f3fbdf77f..1b08c88b6d 100644 --- a/docs/source-pytorch/common/precision_intermediate.rst +++ b/docs/source-pytorch/common/precision_intermediate.rst @@ -101,7 +101,7 @@ Set the `NVIDIA optimization level `_ A LightningModule enables your PyTorch nn.Module to play together in complex ways inside the training_step (there is also an optional validation_step and test_step). .. testcode:: + :skipif: not _TORCHVISION_AVAILABLE import os from torch import optim, nn, utils, Tensor diff --git a/docs/source-pytorch/visualize/supported_exp_managers.rst b/docs/source-pytorch/visualize/supported_exp_managers.rst index c0ae4d94e1..948974b47d 100644 --- a/docs/source-pytorch/visualize/supported_exp_managers.rst +++ b/docs/source-pytorch/visualize/supported_exp_managers.rst @@ -8,7 +8,8 @@ To use `Comet.ml `_ first install the comet package: Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`: -.. code-block:: python +.. testcode:: + :skipif: not _COMET_AVAILABLE from pytorch_lightning.loggers import CometLogger @@ -39,7 +40,8 @@ To use `MLflow `_ first install the MLflow package: Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`: -.. code-block:: python +.. testcode:: + :skipif: not _MLFLOW_AVAILABLE from pytorch_lightning.loggers import MLFlowLogger @@ -76,7 +78,8 @@ or with conda: Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`: -.. code-block:: python +.. testcode:: + :skipif: not _NEPTUNE_AVAILABLE from pytorch_lightning.loggers import NeptuneLogger @@ -140,7 +143,8 @@ To use `Weights and Biases `_ (wa Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`: -.. code-block:: python +.. testcode:: + :skipif: not _WANDB_AVAILABLE from pytorch_lightning.loggers import WandbLogger @@ -174,7 +178,8 @@ Use multiple exp managers ========================= To use multiple experiment managers at the same time, pass a list to the *logger* :class:`~pytorch_lightning.trainer.trainer.Trainer` argument. -.. code-block:: python +.. testcode:: + :skipif: not _WANDB_AVAILABLE from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger diff --git a/requirements/README.md b/requirements/README.md index 24ce056168..1c5403b41e 100644 --- a/requirements/README.md +++ b/requirements/README.md @@ -2,7 +2,7 @@ This root requirements folder branches into sub-folders depending on the python package. Within the folder, we have grouped requirements files/lists per focus, which shall closely match package extra -So, for example, when you install PL as `pip install pytorch-lightning[loggers]`, this list is stored in `requirements/pytorch/loggers.txt`. +So, for example, when you install PL as `pip install pytorch-lightning[extra]`, this list is stored in `requirements/pytorch/extra.txt`. The only exceptional requirement file is `devel.txt`, which aggregated all the needed requirements for development. ## CI/CD upper bounds diff --git a/requirements/pytorch/devel.txt b/requirements/pytorch/devel.txt index 1169a78e38..fe3159645a 100644 --- a/requirements/pytorch/devel.txt +++ b/requirements/pytorch/devel.txt @@ -4,11 +4,5 @@ # install all extra dependencies for full package testing -r ./extra.txt -# install all loggers for full package testing --r ./loggers.txt - # extended list of dependencies for development and run lint and tests -r ./test.txt - -# install all extra dependencies for running examples --r ./examples.txt diff --git a/requirements/pytorch/loggers.info b/requirements/pytorch/loggers.info new file mode 100644 index 0000000000..590d3babcc --- /dev/null +++ b/requirements/pytorch/loggers.info @@ -0,0 +1,5 @@ +# all supported loggers. this list is here as a reference, but they are not installed in CI +neptune-client +comet-ml +mlflow +wandb diff --git a/requirements/pytorch/loggers.txt b/requirements/pytorch/loggers.txt deleted file mode 100644 index 679904c57c..0000000000 --- a/requirements/pytorch/loggers.txt +++ /dev/null @@ -1,8 +0,0 @@ -# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package -# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment - -# all supported loggers -neptune-client>=0.10.0, <0.16.10 -comet-ml>=3.1.12, <3.31.16 -mlflow>=1.0.0, <1.29.0 -wandb>=0.10.22, <0.13.4 diff --git a/src/pytorch_lightning/__setup__.py b/src/pytorch_lightning/__setup__.py index 6e13775541..e2fc772f7e 100644 --- a/src/pytorch_lightning/__setup__.py +++ b/src/pytorch_lightning/__setup__.py @@ -33,14 +33,13 @@ def _prepare_extras() -> Dict[str, Any]: extras = { # 'docs': load_requirements(file_name='docs.txt'), "examples": setup_tools.load_requirements(file_name="examples.txt", **common_args), - "loggers": setup_tools.load_requirements(file_name="loggers.txt", **common_args), "extra": setup_tools.load_requirements(file_name="extra.txt", **common_args), "strategies": setup_tools.load_requirements(file_name="strategies.txt", **common_args), "test": setup_tools.load_requirements(file_name="test.txt", **common_args), } for req in parse_requirements(extras["strategies"]): extras[req.key] = [str(req)] - extras["dev"] = extras["extra"] + extras["loggers"] + extras["test"] + extras["dev"] = extras["extra"] + extras["test"] extras["all"] = extras["dev"] + extras["examples"] + extras["strategies"] # + extras['docs'] return extras diff --git a/src/pytorch_lightning/loggers/neptune.py b/src/pytorch_lightning/loggers/neptune.py index 996f96e25c..6dc9b66330 100644 --- a/src/pytorch_lightning/loggers/neptune.py +++ b/src/pytorch_lightning/loggers/neptune.py @@ -151,6 +151,7 @@ class NeptuneLogger(Logger): You can also pass ``neptune_run_kwargs`` to specify the run in the greater detail, like ``tags`` or ``description``: .. testcode:: + :skipif: not _NEPTUNE_AVAILABLE from pytorch_lightning import Trainer from pytorch_lightning.loggers import NeptuneLogger diff --git a/src/pytorch_lightning/loggers/wandb.py b/src/pytorch_lightning/loggers/wandb.py index d2e496f249..5d60989c65 100644 --- a/src/pytorch_lightning/loggers/wandb.py +++ b/src/pytorch_lightning/loggers/wandb.py @@ -41,7 +41,7 @@ try: from wandb.wandb_run import Run except ModuleNotFoundError: # needed for test mocks, these tests shall be updated - wandb, Run, RunDisabled = None, None, None # type: ignore + wandb, Run, RunDisabled = None, None, None _WANDB_AVAILABLE = RequirementCache("wandb") _WANDB_GREATER_EQUAL_0_10_22 = RequirementCache("wandb>=0.10.22") diff --git a/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py index 50480e769b..65d0b365d8 100644 --- a/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -52,7 +52,7 @@ class CheckpointConnector: if resume_from_checkpoint is not None: rank_zero_deprecation( "Setting `Trainer(resume_from_checkpoint=)` is deprecated in v1.5 and" - " will be removed in v1.7. Please pass `Trainer.fit(ckpt_path=)` directly instead." + " will be removed in v2.0. Please pass `Trainer.fit(ckpt_path=)` directly instead." ) self._loaded_checkpoint: Dict[str, Any] = {}