Remove examples and loggers from develop dependencies (#15282)
* Remove examples and loggers from develop dependencies * remove more references * Fix mypy * Keep logger file for docs mocking * Simpler fix * Fix docs build * Global testsetup * Matching files * Undo change * loggers as info * Clarify * Update requirements/pytorch/loggers.info Co-authored-by: Jirka <jirka.borovec@seznam.cz> Co-authored-by: otaj <6065855+otaj@users.noreply.github.com>
This commit is contained in:
parent
76e462a0be
commit
7b3de1215f
|
@ -22,7 +22,6 @@ REQUIREMENT_FILES = {
|
|||
"pytorch": (
|
||||
"requirements/pytorch/base.txt",
|
||||
"requirements/pytorch/extra.txt",
|
||||
"requirements/pytorch/loggers.txt",
|
||||
"requirements/pytorch/strategies.txt",
|
||||
"requirements/pytorch/examples.txt",
|
||||
),
|
||||
|
|
|
@ -76,8 +76,7 @@ jobs:
|
|||
python ./requirements/pytorch/adjust-versions.py requirements/lite/base.txt ${PYTORCH_VERSION}
|
||||
|
||||
CUDA_VERSION_MM=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))")
|
||||
pip install -e .[strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
|
||||
pip install --requirement requirements/pytorch/devel.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
|
||||
pip install -e .[dev,strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
|
||||
|
||||
pip list
|
||||
env:
|
||||
|
|
|
@ -110,6 +110,7 @@ jobs:
|
|||
CUDA_VERSION_MM=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))")
|
||||
pip install -e .[strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
|
||||
pip install --requirement requirements/pytorch/devel.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
|
||||
pip install --requirement requirements/pytorch/examples.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
|
||||
|
||||
CUDA_VERSION_BAGUA=$(python -c "print([ver for ver in [116,113,111,102] if $CUDA_VERSION_MM >= ver][0])")
|
||||
pip install "bagua-cuda$CUDA_VERSION_BAGUA"
|
||||
|
|
|
@ -72,7 +72,7 @@ jobs:
|
|||
export GIT_TERMINAL_PROMPT=1
|
||||
python ./requirements/pytorch/adjust-versions.py requirements/pytorch/extra.txt
|
||||
python ./requirements/pytorch/adjust-versions.py requirements/pytorch/examples.txt
|
||||
pip install -e . --requirement ./requirements/pytorch/devel.txt
|
||||
pip install -e .[dev]
|
||||
pip list
|
||||
env:
|
||||
PACKAGE_NAME: pytorch
|
||||
|
|
|
@ -11,6 +11,10 @@ concurrency:
|
|||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
|
||||
cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
make-doctest:
|
||||
runs-on: ubuntu-20.04
|
||||
|
@ -50,7 +54,6 @@ jobs:
|
|||
-r requirements/${{ matrix.pkg }}/docs.txt \
|
||||
--find-links https://download.pytorch.org/whl/cpu/torch_stable.html
|
||||
pip list
|
||||
shell: bash
|
||||
|
||||
- name: Install dependencies
|
||||
env:
|
||||
|
@ -60,15 +63,12 @@ jobs:
|
|||
pip install -r requirements/${{ matrix.pkg }}/devel.txt \
|
||||
--find-links https://download.pytorch.org/whl/cpu/torch_stable.html
|
||||
pip list
|
||||
shell: bash
|
||||
|
||||
- name: Test Documentation
|
||||
env:
|
||||
SPHINX_MOCK_REQUIREMENTS: 0
|
||||
working-directory: ./docs/source-${{ matrix.pkg }}
|
||||
run: |
|
||||
# ToDo: proper parametrize
|
||||
# First run the same pipeline as Read-The-Docs
|
||||
make doctest
|
||||
make coverage
|
||||
|
||||
|
@ -111,8 +111,6 @@ jobs:
|
|||
- name: Make Documentation
|
||||
working-directory: ./docs/source-${{ matrix.pkg }}
|
||||
run: |
|
||||
# ToDo: rather use python cmd
|
||||
# First run the same pipeline as Read-The-Docs
|
||||
make html --debug --jobs $(nproc) SPHINXOPTS="-W --keep-going"
|
||||
|
||||
- name: Upload built docs
|
||||
|
|
|
@ -403,11 +403,15 @@ from torch import nn
|
|||
from torch.utils.data import IterableDataset, DataLoader, Dataset
|
||||
from pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything
|
||||
from pytorch_lightning.callbacks import Callback
|
||||
from pytorch_lightning.cli import LightningCLI
|
||||
from pytorch_lightning.cli import _JSONARGPARSE_SIGNATURES_AVAILABLE as _JSONARGPARSE_AVAILABLE
|
||||
from pytorch_lightning.utilities import (
|
||||
_APEX_AVAILABLE,
|
||||
_TORCHVISION_AVAILABLE,
|
||||
_TORCH_GREATER_EQUAL_1_10,
|
||||
)
|
||||
from pytorch_lightning.loggers.neptune import _NEPTUNE_AVAILABLE
|
||||
from pytorch_lightning.loggers.comet import _COMET_AVAILABLE
|
||||
from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE
|
||||
from pytorch_lightning.loggers.wandb import _WANDB_AVAILABLE
|
||||
"""
|
||||
coverage_skip_undoc_in_source = True
|
||||
|
|
|
@ -101,7 +101,7 @@ Set the `NVIDIA optimization level <https://nvidia.github.io/apex/amp.html#opt-l
|
|||
.. testcode::
|
||||
:skipif: not _APEX_AVAILABLE or not torch.cuda.is_available()
|
||||
|
||||
from pytorch_lightning.plugins.apex_amp import ApexMixedPrecisionPlugin
|
||||
from pytorch_lightning.plugins import ApexMixedPrecisionPlugin
|
||||
|
||||
|
||||
apex_plugin = ApexMixedPrecisionPlugin(amp_level="O3")
|
||||
|
|
|
@ -1173,7 +1173,7 @@ Half precision, or mixed precision, is the combined use of 32 and 16 bit floatin
|
|||
.. testcode::
|
||||
:skipif: not _APEX_AVAILABLE or not torch.cuda.is_available()
|
||||
|
||||
from pytorch_lightning.plugins.apex_amp import ApexMixedPrecisionPlugin
|
||||
from pytorch_lightning.plugins import ApexMixedPrecisionPlugin
|
||||
|
||||
|
||||
apex_plugin = ApexMixedPrecisionPlugin(amp_level="O2")
|
||||
|
|
|
@ -333,8 +333,6 @@ PACKAGE_MAPPING = {
|
|||
"Pillow": "PIL",
|
||||
"opencv-python": "cv2",
|
||||
"PyYAML": "yaml",
|
||||
"comet-ml": "comet_ml",
|
||||
"neptune-client": "neptune",
|
||||
"hydra-core": "hydra",
|
||||
}
|
||||
MOCK_PACKAGES = []
|
||||
|
@ -343,7 +341,6 @@ if SPHINX_MOCK_REQUIREMENTS:
|
|||
# mock also base packages when we are on RTD since we don't install them there
|
||||
MOCK_PACKAGES += package_list_from_file(_path_require("base.txt"))
|
||||
MOCK_PACKAGES += package_list_from_file(_path_require("extra.txt"))
|
||||
MOCK_PACKAGES += package_list_from_file(_path_require("loggers.txt"))
|
||||
MOCK_PACKAGES += package_list_from_file(_path_require("strategies.txt"))
|
||||
MOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]
|
||||
|
||||
|
@ -397,5 +394,9 @@ from pytorch_lightning.utilities import (
|
|||
_TORCHVISION_AVAILABLE,
|
||||
_TORCH_GREATER_EQUAL_1_10,
|
||||
)
|
||||
from pytorch_lightning.loggers.neptune import _NEPTUNE_AVAILABLE
|
||||
from pytorch_lightning.loggers.comet import _COMET_AVAILABLE
|
||||
from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE
|
||||
from pytorch_lightning.loggers.wandb import _WANDB_AVAILABLE
|
||||
"""
|
||||
coverage_skip_undoc_in_source = True
|
||||
|
|
|
@ -71,6 +71,7 @@ You can also pass a custom Logger to the :class:`~pytorch_lightning.trainer.trai
|
|||
Choose from any of the others such as MLflow, Comet, Neptune, WandB, etc.
|
||||
|
||||
.. testcode::
|
||||
:skipif: not _COMET_AVAILABLE
|
||||
|
||||
comet_logger = pl_loggers.CometLogger(save_dir="logs/")
|
||||
trainer = Trainer(logger=comet_logger)
|
||||
|
@ -78,6 +79,7 @@ Choose from any of the others such as MLflow, Comet, Neptune, WandB, etc.
|
|||
To use multiple loggers, simply pass in a ``list`` or ``tuple`` of loggers.
|
||||
|
||||
.. testcode::
|
||||
:skipif: not _COMET_AVAILABLE
|
||||
|
||||
tb_logger = pl_loggers.TensorBoardLogger(save_dir="logs/")
|
||||
comet_logger = pl_loggers.CometLogger(save_dir="logs/")
|
||||
|
|
|
@ -112,6 +112,7 @@ Or read the `advanced install guide <installation.html>`_
|
|||
A LightningModule enables your PyTorch nn.Module to play together in complex ways inside the training_step (there is also an optional validation_step and test_step).
|
||||
|
||||
.. testcode::
|
||||
:skipif: not _TORCHVISION_AVAILABLE
|
||||
|
||||
import os
|
||||
from torch import optim, nn, utils, Tensor
|
||||
|
|
|
@ -8,7 +8,8 @@ To use `Comet.ml <https://www.comet.ml/site/>`_ first install the comet package:
|
|||
|
||||
Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
|
||||
|
||||
.. code-block:: python
|
||||
.. testcode::
|
||||
:skipif: not _COMET_AVAILABLE
|
||||
|
||||
from pytorch_lightning.loggers import CometLogger
|
||||
|
||||
|
@ -39,7 +40,8 @@ To use `MLflow <https://mlflow.org/>`_ first install the MLflow package:
|
|||
|
||||
Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
|
||||
|
||||
.. code-block:: python
|
||||
.. testcode::
|
||||
:skipif: not _MLFLOW_AVAILABLE
|
||||
|
||||
from pytorch_lightning.loggers import MLFlowLogger
|
||||
|
||||
|
@ -76,7 +78,8 @@ or with conda:
|
|||
|
||||
Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
|
||||
|
||||
.. code-block:: python
|
||||
.. testcode::
|
||||
:skipif: not _NEPTUNE_AVAILABLE
|
||||
|
||||
from pytorch_lightning.loggers import NeptuneLogger
|
||||
|
||||
|
@ -140,7 +143,8 @@ To use `Weights and Biases <https://docs.wandb.ai/integrations/lightning/>`_ (wa
|
|||
|
||||
Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
|
||||
|
||||
.. code-block:: python
|
||||
.. testcode::
|
||||
:skipif: not _WANDB_AVAILABLE
|
||||
|
||||
from pytorch_lightning.loggers import WandbLogger
|
||||
|
||||
|
@ -174,7 +178,8 @@ Use multiple exp managers
|
|||
=========================
|
||||
To use multiple experiment managers at the same time, pass a list to the *logger* :class:`~pytorch_lightning.trainer.trainer.Trainer` argument.
|
||||
|
||||
.. code-block:: python
|
||||
.. testcode::
|
||||
:skipif: not _WANDB_AVAILABLE
|
||||
|
||||
from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
This root requirements folder branches into sub-folders depending on the python package.
|
||||
Within the folder, we have grouped requirements files/lists per focus, which shall closely match package extra
|
||||
So, for example, when you install PL as `pip install pytorch-lightning[loggers]`, this list is stored in `requirements/pytorch/loggers.txt`.
|
||||
So, for example, when you install PL as `pip install pytorch-lightning[extra]`, this list is stored in `requirements/pytorch/extra.txt`.
|
||||
The only exceptional requirement file is `devel.txt`, which aggregated all the needed requirements for development.
|
||||
|
||||
## CI/CD upper bounds
|
||||
|
|
|
@ -4,11 +4,5 @@
|
|||
# install all extra dependencies for full package testing
|
||||
-r ./extra.txt
|
||||
|
||||
# install all loggers for full package testing
|
||||
-r ./loggers.txt
|
||||
|
||||
# extended list of dependencies for development and run lint and tests
|
||||
-r ./test.txt
|
||||
|
||||
# install all extra dependencies for running examples
|
||||
-r ./examples.txt
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# all supported loggers. this list is here as a reference, but they are not installed in CI
|
||||
neptune-client
|
||||
comet-ml
|
||||
mlflow
|
||||
wandb
|
|
@ -1,8 +0,0 @@
|
|||
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
|
||||
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
|
||||
|
||||
# all supported loggers
|
||||
neptune-client>=0.10.0, <0.16.10
|
||||
comet-ml>=3.1.12, <3.31.16
|
||||
mlflow>=1.0.0, <1.29.0
|
||||
wandb>=0.10.22, <0.13.4
|
|
@ -33,14 +33,13 @@ def _prepare_extras() -> Dict[str, Any]:
|
|||
extras = {
|
||||
# 'docs': load_requirements(file_name='docs.txt'),
|
||||
"examples": setup_tools.load_requirements(file_name="examples.txt", **common_args),
|
||||
"loggers": setup_tools.load_requirements(file_name="loggers.txt", **common_args),
|
||||
"extra": setup_tools.load_requirements(file_name="extra.txt", **common_args),
|
||||
"strategies": setup_tools.load_requirements(file_name="strategies.txt", **common_args),
|
||||
"test": setup_tools.load_requirements(file_name="test.txt", **common_args),
|
||||
}
|
||||
for req in parse_requirements(extras["strategies"]):
|
||||
extras[req.key] = [str(req)]
|
||||
extras["dev"] = extras["extra"] + extras["loggers"] + extras["test"]
|
||||
extras["dev"] = extras["extra"] + extras["test"]
|
||||
extras["all"] = extras["dev"] + extras["examples"] + extras["strategies"] # + extras['docs']
|
||||
return extras
|
||||
|
||||
|
|
|
@ -151,6 +151,7 @@ class NeptuneLogger(Logger):
|
|||
You can also pass ``neptune_run_kwargs`` to specify the run in the greater detail, like ``tags`` or ``description``:
|
||||
|
||||
.. testcode::
|
||||
:skipif: not _NEPTUNE_AVAILABLE
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.loggers import NeptuneLogger
|
||||
|
|
|
@ -41,7 +41,7 @@ try:
|
|||
from wandb.wandb_run import Run
|
||||
except ModuleNotFoundError:
|
||||
# needed for test mocks, these tests shall be updated
|
||||
wandb, Run, RunDisabled = None, None, None # type: ignore
|
||||
wandb, Run, RunDisabled = None, None, None
|
||||
|
||||
_WANDB_AVAILABLE = RequirementCache("wandb")
|
||||
_WANDB_GREATER_EQUAL_0_10_22 = RequirementCache("wandb>=0.10.22")
|
||||
|
|
|
@ -52,7 +52,7 @@ class CheckpointConnector:
|
|||
if resume_from_checkpoint is not None:
|
||||
rank_zero_deprecation(
|
||||
"Setting `Trainer(resume_from_checkpoint=)` is deprecated in v1.5 and"
|
||||
" will be removed in v1.7. Please pass `Trainer.fit(ckpt_path=)` directly instead."
|
||||
" will be removed in v2.0. Please pass `Trainer.fit(ckpt_path=)` directly instead."
|
||||
)
|
||||
self._loaded_checkpoint: Dict[str, Any] = {}
|
||||
|
||||
|
|
Loading…
Reference in New Issue