From 7d4780adb110f3dd0dcc1f5543d8b24dda6c4958 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Thu, 2 Feb 2023 03:22:42 +0900 Subject: [PATCH] move pytorch_lightning >> lightning/pytorch (#16594) --- .azure/gpu-tests-pytorch.yml | 3 +- .azure/hpu-tests.yml | 3 +- .azure/ipu-tests.yml | 3 +- .github/checkgroup.yml | 15 +- .github/labeler.yml | 3 +- .github/workflows/ci-tests-pytorch.yml | 3 +- .github/workflows/tpu-tests.yml | 3 +- .gitignore | 5 +- .pre-commit-config.yaml | 7 +- setup.py | 4 +- .../pytorch}/CHANGELOG.md | 2 +- .../pytorch}/__init__.py | 20 ++- .../pytorch}/_graveyard/__init__.py | 2 +- .../_graveyard/legacy_import_unpickler.py | 4 +- .../pytorch}/accelerators/__init__.py | 20 +-- .../pytorch}/accelerators/accelerator.py | 6 +- .../pytorch}/accelerators/cpu.py | 10 +- .../pytorch}/accelerators/cuda.py | 12 +- .../pytorch}/accelerators/hpu.py | 12 +- .../pytorch}/accelerators/ipu.py | 6 +- .../pytorch}/accelerators/mps.py | 12 +- .../pytorch}/accelerators/tpu.py | 8 +- .../pytorch}/callbacks/__init__.py | 40 +++--- .../pytorch}/callbacks/batch_size_finder.py | 16 +-- .../pytorch}/callbacks/callback.py | 12 +- .../pytorch}/callbacks/checkpoint.py | 4 +- .../callbacks/device_stats_monitor.py | 18 +-- .../pytorch}/callbacks/early_stopping.py | 16 +-- .../pytorch}/callbacks/finetuning.py | 12 +- .../gradient_accumulation_scheduler.py | 10 +- .../pytorch}/callbacks/lambda_function.py | 8 +- .../pytorch}/callbacks/lr_finder.py | 12 +- .../pytorch}/callbacks/lr_monitor.py | 14 +- .../pytorch}/callbacks/model_checkpoint.py | 26 ++-- .../pytorch}/callbacks/model_summary.py | 22 +-- .../callbacks/on_exception_checkpoint.py | 10 +- .../pytorch}/callbacks/prediction_writer.py | 12 +- .../pytorch}/callbacks/progress/__init__.py | 6 +- .../pytorch}/callbacks/progress/base.py | 14 +- .../callbacks/progress/rich_progress.py | 10 +- .../callbacks/progress/tqdm_progress.py | 16 +-- .../pytorch}/callbacks/pruning.py | 10 +- .../pytorch}/callbacks/quantization.py | 8 +- .../pytorch}/callbacks/rich_model_summary.py | 18 +-- .../callbacks/stochastic_weight_avg.py | 16 +-- .../pytorch}/callbacks/timer.py | 16 +-- .../pytorch}/cli.py | 36 ++--- .../pytorch}/core/__init__.py | 4 +- .../pytorch}/core/datamodule.py | 16 +-- .../pytorch}/core/hooks.py | 22 +-- .../pytorch}/core/mixins/__init__.py | 2 +- .../pytorch}/core/mixins/hparams_mixin.py | 12 +- .../pytorch}/core/module.py | 66 ++++----- .../pytorch}/core/optimizer.py | 14 +- .../pytorch}/core/saving.py | 18 +-- .../pytorch}/demos/__init__.py | 0 .../pytorch}/demos/boring_classes.py | 8 +- .../pytorch}/demos/mnist_datamodule.py | 6 +- .../pytorch}/loggers/__init__.py | 14 +- .../pytorch}/loggers/comet.py | 20 +-- .../pytorch}/loggers/csv_logs.py | 22 +-- .../pytorch}/loggers/logger.py | 8 +- .../pytorch}/loggers/mlflow.py | 24 ++-- .../pytorch}/loggers/neptune.py | 30 ++-- .../pytorch}/loggers/tensorboard.py | 24 ++-- .../pytorch}/loggers/wandb.py | 32 ++--- .../pytorch}/loops/__init__.py | 10 +- .../pytorch}/loops/dataloader/__init__.py | 6 +- .../loops/dataloader/dataloader_loop.py | 4 +- .../loops/dataloader/evaluation_loop.py | 22 +-- .../loops/dataloader/prediction_loop.py | 12 +- .../pytorch}/loops/epoch/__init__.py | 6 +- .../loops/epoch/evaluation_epoch_loop.py | 14 +- .../loops/epoch/prediction_epoch_loop.py | 12 +- .../loops/epoch/training_epoch_loop.py | 30 ++-- .../pytorch}/loops/fit_loop.py | 26 ++-- .../pytorch}/loops/loop.py | 4 +- .../pytorch}/loops/optimization/__init__.py | 4 +- .../pytorch}/loops/optimization/closure.py | 2 +- .../loops/optimization/manual_loop.py | 18 +-- .../loops/optimization/optimizer_loop.py | 20 +-- .../pytorch}/loops/progress.py | 0 .../pytorch}/loops/utilities.py | 20 +-- .../pytorch}/overrides/__init__.py | 0 .../pytorch}/overrides/base.py | 4 +- .../pytorch}/overrides/data_parallel.py | 6 +- .../pytorch}/overrides/distributed.py | 2 +- .../pytorch}/overrides/torch_distributed.py | 0 src/lightning/pytorch/plugins/__init__.py | 39 ++++++ .../pytorch}/plugins/environments/__init__.py | 6 +- .../plugins/environments/bagua_environment.py | 2 +- .../pytorch}/plugins/io/__init__.py | 6 +- .../pytorch}/plugins/io/async_plugin.py | 4 +- .../pytorch}/plugins/io/checkpoint_plugin.py | 2 +- .../pytorch}/plugins/io/hpu_plugin.py | 8 +- .../pytorch}/plugins/io/torch_plugin.py | 2 +- .../pytorch}/plugins/io/wrapper.py | 2 +- .../pytorch}/plugins/io/xla_plugin.py | 2 +- .../pytorch}/plugins/layer_sync.py | 0 .../pytorch}/plugins/precision/__init__.py | 20 +-- .../pytorch}/plugins/precision/amp.py | 12 +- .../pytorch}/plugins/precision/colossalai.py | 8 +- .../pytorch}/plugins/precision/deepspeed.py | 16 +-- .../pytorch}/plugins/precision/double.py | 8 +- .../pytorch}/plugins/precision/fsdp.py | 6 +- .../pytorch}/plugins/precision/hpu.py | 6 +- .../pytorch}/plugins/precision/ipu.py | 14 +- .../plugins/precision/precision_plugin.py | 10 +- .../pytorch}/plugins/precision/tpu.py | 10 +- .../pytorch}/plugins/precision/tpu_bf16.py | 2 +- .../pytorch}/profilers/__init__.py | 12 +- .../pytorch}/profilers/advanced.py | 4 +- .../pytorch}/profilers/base.py | 2 +- .../pytorch}/profilers/profiler.py | 2 +- .../pytorch}/profilers/pytorch.py | 18 +-- .../pytorch}/profilers/simple.py | 4 +- .../pytorch}/profilers/xla.py | 4 +- src/lightning/pytorch/serve/__init__.py | 4 + .../pytorch}/serve/servable_module.py | 6 +- .../serve/servable_module_validator.py | 14 +- src/lightning/pytorch/strategies/__init__.py | 34 +++++ .../pytorch}/strategies/bagua.py | 22 +-- .../pytorch}/strategies/colossalai.py | 28 ++-- .../pytorch}/strategies/ddp.py | 40 +++--- .../pytorch}/strategies/ddp_spawn.py | 36 ++--- .../pytorch}/strategies/deepspeed.py | 38 +++--- .../pytorch}/strategies/dp.py | 20 +-- .../pytorch}/strategies/fsdp.py | 42 +++--- .../pytorch}/strategies/hpu_parallel.py | 24 ++-- .../pytorch}/strategies/ipu.py | 28 ++-- .../pytorch}/strategies/launchers/__init__.py | 6 +- .../pytorch}/strategies/launchers/launcher.py | 4 +- .../strategies/launchers/multiprocessing.py | 24 ++-- .../strategies/launchers/subprocess_script.py | 12 +- .../pytorch}/strategies/launchers/xla.py | 16 +-- .../pytorch}/strategies/parallel.py | 12 +- .../pytorch}/strategies/single_device.py | 10 +- .../pytorch}/strategies/single_hpu.py | 20 +-- .../pytorch}/strategies/single_tpu.py | 14 +- .../pytorch}/strategies/strategy.py | 34 ++--- .../pytorch}/strategies/tpu_spawn.py | 38 +++--- .../pytorch}/strategies/utils.py | 8 +- .../pytorch}/trainer/__init__.py | 4 +- .../pytorch}/trainer/call.py | 8 +- .../trainer/configuration_validator.py | 18 +-- .../pytorch}/trainer/connectors/__init__.py | 0 .../connectors/accelerator_connector.py | 38 +++--- .../trainer/connectors/callback_connector.py | 24 ++-- .../connectors/checkpoint_connector.py | 28 ++-- .../trainer/connectors/data_connector.py | 36 ++--- .../connectors/logger_connector/__init__.py | 1 + .../logger_connector/fx_validator.py | 2 +- .../logger_connector/logger_connector.py | 16 +-- .../connectors/logger_connector/result.py | 24 ++-- .../trainer/connectors/signal_connector.py | 10 +- .../pytorch}/trainer/setup.py | 16 +-- .../pytorch}/trainer/states.py | 12 +- .../pytorch}/trainer/supporters.py | 8 +- .../pytorch}/trainer/trainer.py | 128 +++++++++--------- .../pytorch}/tuner/__init__.py | 2 +- .../pytorch}/tuner/batch_size_scaling.py | 16 +-- .../pytorch}/tuner/lr_finder.py | 18 +-- .../pytorch}/tuner/tuning.py | 24 ++-- .../pytorch}/utilities/__init__.py | 16 +-- .../pytorch}/utilities/argparse.py | 18 +-- .../pytorch}/utilities/data.py | 20 +-- .../pytorch}/utilities/deepspeed.py | 6 +- .../pytorch}/utilities/distributed.py | 4 +- .../pytorch}/utilities/enums.py | 2 +- .../pytorch}/utilities/exceptions.py | 6 +- .../pytorch}/utilities/fetching.py | 6 +- .../pytorch}/utilities/finite_checks.py | 0 .../pytorch}/utilities/grads.py | 0 .../pytorch}/utilities/imports.py | 0 .../pytorch}/utilities/logger.py | 2 +- .../pytorch}/utilities/memory.py | 0 .../pytorch}/utilities/meta.py | 0 .../pytorch}/utilities/metrics.py | 2 +- .../pytorch}/utilities/migration/__init__.py | 4 +- .../pytorch}/utilities/migration/migration.py | 12 +- .../pytorch}/utilities/migration/utils.py | 24 ++-- .../pytorch}/utilities/model_helpers.py | 4 +- .../utilities/model_summary/__init__.py | 4 +- .../utilities/model_summary/model_summary.py | 10 +- .../model_summary/model_summary_deepspeed.py | 2 +- .../pytorch}/utilities/parameter_tying.py | 0 .../pytorch}/utilities/parsing.py | 8 +- .../pytorch}/utilities/rank_zero.py | 4 +- .../pytorch}/utilities/seed.py | 2 +- .../pytorch}/utilities/signature_utils.py | 0 .../pytorch}/utilities/types.py | 2 +- .../pytorch}/utilities/upgrade_checkpoint.py | 2 +- .../pytorch}/utilities/warnings.py | 2 +- src/pytorch_lightning/plugins/__init__.py | 39 ------ src/pytorch_lightning/serve/__init__.py | 4 - src/pytorch_lightning/strategies/__init__.py | 34 ----- .../connectors/logger_connector/__init__.py | 1 - 197 files changed, 1283 insertions(+), 1264 deletions(-) rename src/{pytorch_lightning => lightning/pytorch}/CHANGELOG.md (100%) rename src/{pytorch_lightning => lightning/pytorch}/__init__.py (73%) rename src/{pytorch_lightning => lightning/pytorch}/_graveyard/__init__.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/_graveyard/legacy_import_unpickler.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/__init__.py (56%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/accelerator.py (89%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/cpu.py (89%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/cuda.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/hpu.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/ipu.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/mps.py (88%) rename src/{pytorch_lightning => lightning/pytorch}/accelerators/tpu.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/__init__.py (52%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/batch_size_finder.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/callback.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/checkpoint.py (66%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/device_stats_monitor.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/early_stopping.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/finetuning.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/gradient_accumulation_scheduler.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/lambda_function.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/lr_finder.py (92%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/lr_monitor.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/model_checkpoint.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/model_summary.py (78%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/on_exception_checkpoint.py (88%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/prediction_writer.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/progress/__init__.py (79%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/progress/base.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/progress/rich_progress.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/progress/tqdm_progress.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/pruning.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/quantization.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/rich_model_summary.py (85%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/stochastic_weight_avg.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/callbacks/timer.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/cli.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/core/__init__.py (84%) rename src/{pytorch_lightning => lightning/pytorch}/core/datamodule.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/core/hooks.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/core/mixins/__init__.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/core/mixins/hparams_mixin.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/core/module.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/core/optimizer.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/core/saving.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/demos/__init__.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/demos/boring_classes.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/demos/mnist_datamodule.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/__init__.py (68%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/comet.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/csv_logs.py (87%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/logger.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/mlflow.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/neptune.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/tensorboard.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/loggers/wandb.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loops/__init__.py (70%) rename src/{pytorch_lightning => lightning/pytorch}/loops/dataloader/__init__.py (77%) rename src/{pytorch_lightning => lightning/pytorch}/loops/dataloader/dataloader_loop.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loops/dataloader/evaluation_loop.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/loops/dataloader/prediction_loop.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/loops/epoch/__init__.py (77%) rename src/{pytorch_lightning => lightning/pytorch}/loops/epoch/evaluation_epoch_loop.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/loops/epoch/prediction_epoch_loop.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loops/epoch/training_epoch_loop.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loops/fit_loop.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loops/loop.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/loops/optimization/__init__.py (83%) rename src/{pytorch_lightning => lightning/pytorch}/loops/optimization/closure.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/loops/optimization/manual_loop.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/loops/optimization/optimizer_loop.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/loops/progress.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/loops/utilities.py (88%) rename src/{pytorch_lightning => lightning/pytorch}/overrides/__init__.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/overrides/base.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/overrides/data_parallel.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/overrides/distributed.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/overrides/torch_distributed.py (100%) create mode 100644 src/lightning/pytorch/plugins/__init__.py rename src/{pytorch_lightning => lightning/pytorch}/plugins/environments/__init__.py (79%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/environments/bagua_environment.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/__init__.py (78%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/async_plugin.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/checkpoint_plugin.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/hpu_plugin.py (89%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/torch_plugin.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/wrapper.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/io/xla_plugin.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/layer_sync.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/__init__.py (61%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/amp.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/colossalai.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/deepspeed.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/double.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/fsdp.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/hpu.py (92%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/ipu.py (89%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/precision_plugin.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/tpu.py (89%) rename src/{pytorch_lightning => lightning/pytorch}/plugins/precision/tpu_bf16.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/__init__.py (66%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/advanced.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/base.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/profiler.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/pytorch.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/simple.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/profilers/xla.py (95%) create mode 100644 src/lightning/pytorch/serve/__init__.py rename src/{pytorch_lightning => lightning/pytorch}/serve/servable_module.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/serve/servable_module_validator.py (94%) create mode 100644 src/lightning/pytorch/strategies/__init__.py rename src/{pytorch_lightning => lightning/pytorch}/strategies/bagua.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/colossalai.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/ddp.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/ddp_spawn.py (92%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/deepspeed.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/dp.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/fsdp.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/hpu_parallel.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/ipu.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/launchers/__init__.py (79%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/launchers/launcher.py (86%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/launchers/multiprocessing.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/launchers/subprocess_script.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/launchers/xla.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/parallel.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/single_device.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/single_hpu.py (86%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/single_tpu.py (86%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/strategy.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/tpu_spawn.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/strategies/utils.py (85%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/__init__.py (84%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/call.py (91%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/configuration_validator.py (93%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/__init__.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/accelerator_connector.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/callback_connector.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/checkpoint_connector.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/data_connector.py (95%) create mode 100644 src/lightning/pytorch/trainer/connectors/logger_connector/__init__.py rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/logger_connector/fx_validator.py (99%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/logger_connector/logger_connector.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/logger_connector/result.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/connectors/signal_connector.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/setup.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/states.py (84%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/supporters.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/trainer/trainer.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/tuner/__init__.py (90%) rename src/{pytorch_lightning => lightning/pytorch}/tuner/batch_size_scaling.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/tuner/lr_finder.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/tuner/tuning.py (92%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/__init__.py (68%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/argparse.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/data.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/deepspeed.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/distributed.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/enums.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/exceptions.py (84%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/fetching.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/finite_checks.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/grads.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/imports.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/logger.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/memory.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/meta.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/metrics.py (94%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/migration/__init__.py (83%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/migration/migration.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/migration/utils.py (89%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/model_helpers.py (95%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/model_summary/__init__.py (85%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/model_summary/model_summary.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/model_summary/model_summary_deepspeed.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/parameter_tying.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/parsing.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/rank_zero.py (88%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/seed.py (96%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/signature_utils.py (100%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/types.py (98%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/upgrade_checkpoint.py (97%) rename src/{pytorch_lightning => lightning/pytorch}/utilities/warnings.py (91%) delete mode 100644 src/pytorch_lightning/plugins/__init__.py delete mode 100644 src/pytorch_lightning/serve/__init__.py delete mode 100644 src/pytorch_lightning/strategies/__init__.py delete mode 100644 src/pytorch_lightning/trainer/connectors/logger_connector/__init__.py diff --git a/.azure/gpu-tests-pytorch.yml b/.azure/gpu-tests-pytorch.yml index 5771c58b58..8a3b45ac2c 100644 --- a/.azure/gpu-tests-pytorch.yml +++ b/.azure/gpu-tests-pytorch.yml @@ -26,7 +26,8 @@ pr: - "examples/pl_basics/backbone_image_classifier.py" - "examples/pl_basics/autoencoder.py" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config - "requirements/fabric/**" diff --git a/.azure/hpu-tests.yml b/.azure/hpu-tests.yml index 250cfea944..6ac9e4870d 100644 --- a/.azure/hpu-tests.yml +++ b/.azure/hpu-tests.yml @@ -24,7 +24,8 @@ pr: - "src/lightning/fabric/**" - "src/lightning_fabric/*" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config exclude: diff --git a/.azure/ipu-tests.yml b/.azure/ipu-tests.yml index 8272facb16..afcab05dd1 100644 --- a/.azure/ipu-tests.yml +++ b/.azure/ipu-tests.yml @@ -21,7 +21,8 @@ pr: - "src/lightning/fabric/**" - "src/lightning_fabric/*" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config exclude: diff --git a/.github/checkgroup.yml b/.github/checkgroup.yml index dcd9b9406f..e03512c845 100644 --- a/.github/checkgroup.yml +++ b/.github/checkgroup.yml @@ -11,7 +11,8 @@ subprojects: - "src/lightning/fabric/**" - "src/lightning_fabric/*" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "tests/legacy/**" - "pyproject.toml" # includes pytest config @@ -49,7 +50,8 @@ subprojects: - "examples/pl_basics/backbone_image_classifier.py" - "examples/pl_basics/autoencoder.py" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config - "requirements/fabric/**" @@ -82,7 +84,8 @@ subprojects: - "src/lightning/fabric/**" - "src/lightning_fabric/*" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config - "!requirements/*/docs.txt" @@ -99,7 +102,8 @@ subprojects: - "src/lightning/fabric/**" - "src/lightning_fabric/*" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config - "!requirements/docs.txt" @@ -130,7 +134,8 @@ subprojects: - id: "pytorch_lightning: Docs" paths: - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "docs/source-pytorch/**" - ".actions/**" - ".github/workflows/docs-checks.yml" diff --git a/.github/labeler.yml b/.github/labeler.yml index 721965bf1a..953db13adc 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -9,7 +9,8 @@ app: - 'requirements/app/**' pl: - - 'src/pytorch_lightning/**' + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - 'tests/tests_pytorch/**' - 'tests/legacy/**' - 'examples/pl_*/**' diff --git a/.github/workflows/ci-tests-pytorch.yml b/.github/workflows/ci-tests-pytorch.yml index 3c92185f0d..acbb4142f7 100644 --- a/.github/workflows/ci-tests-pytorch.yml +++ b/.github/workflows/ci-tests-pytorch.yml @@ -10,7 +10,8 @@ on: paths: - ".actions/**" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "tests/legacy/**" - "pyproject.toml" # includes pytest config diff --git a/.github/workflows/tpu-tests.yml b/.github/workflows/tpu-tests.yml index 74b90ba2f9..cf93e562c6 100644 --- a/.github/workflows/tpu-tests.yml +++ b/.github/workflows/tpu-tests.yml @@ -15,7 +15,8 @@ on: - "src/lightning_fabric/*" - "tests/tests_fabric/**" - "requirements/pytorch/**" - - "src/pytorch_lightning/**" + - "src/lightning/pytorch/**" + - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config - "!requirements/*/docs.txt" diff --git a/.gitignore b/.gitignore index 10042faf2b..f0b63229c4 100644 --- a/.gitignore +++ b/.gitignore @@ -57,7 +57,10 @@ src/lightning_fabric/ !src/lightning_fabric/__*__.py !src/lightning_fabric/MANIFEST.in !src/lightning_fabric/README.md -src/lightning/pytorch/ +src/pytorch_lightning/ +!src/pytorch_lightning/__*__.py +!src/pytorch_lightning/MANIFEST.in +!src/pytorch_lightning/README.md # PyInstaller # Usually these files are written by a python script from a template diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0deea67803..18aa7ec740 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,9 +42,8 @@ repos: docs/source-pytorch/_static/images/general/pl_quick_start_full_compressed.gif| docs/source-pytorch/_static/images/general/pl_overview_flat.jpg| docs/source-pytorch/_static/images/general/pl_overview.gif| - src/lightning_app/cli/pl-app-template/ui/yarn.lock| - src/pytorch_lightning/CHANGELOG.md| - src/lightning/fabric/CHANGELOG.md + src/lightning/fabric/CHANGELOG.md| + src/lightning/pytorch/CHANGELOG.md )$ - id: detect-private-key @@ -100,7 +99,7 @@ repos: (?x)^( src/lightning/app/CHANGELOG.md| src/lightning/fabric/CHANGELOG.md| - src/pytorch_lightning/CHANGELOG.md + src/lightning/pytorch/CHANGELOG.md )$ - repo: https://github.com/charliermarsh/ruff-pre-commit diff --git a/setup.py b/setup.py index 14bb33fd92..a38be7177f 100755 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ _PACKAGE_MAPPING = { "fabric": "lightning_fabric", } # TODO: drop this reverse list when all packages are moved -_MIRROR_PACKAGE_REVERSED = ("app", "fabric") +_MIRROR_PACKAGE_REVERSED = ("app", "fabric", "pytorch") # https://packaging.python.org/guides/single-sourcing-package-version/ # http://blog.ionelmc.ro/2014/05/25/python-packaging/ _PATH_ROOT = os.path.dirname(__file__) @@ -142,7 +142,7 @@ if __name__ == "__main__": package_to_install = _PACKAGE_MAPPING.get(_PACKAGE_NAME, "lightning") if package_to_install == "lightning": # merge all requirements files - assistant._load_aggregate_requirements(_PATH_REQUIRE, _FREEZE_REQUIREMENTS) # install everything + assistant._load_aggregate_requirements(_PATH_REQUIRE, _FREEZE_REQUIREMENTS) # replace imports and copy the code assistant.create_mirror_package(_PATH_SRC, _PACKAGE_MAPPING, reverse=_MIRROR_PACKAGE_REVERSED) else: diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/lightning/pytorch/CHANGELOG.md similarity index 100% rename from src/pytorch_lightning/CHANGELOG.md rename to src/lightning/pytorch/CHANGELOG.md index 115d47f266..c4ea1c96da 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/lightning/pytorch/CHANGELOG.md @@ -207,6 +207,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [1.9.0] - 2023-01-17 ### Added + - Added support for native logging of `MetricCollection` with enabled compute groups ([#15580](https://github.com/Lightning-AI/lightning/pull/15580)) - Added support for custom artifact names in `pl.loggers.WandbLogger` ([#16173](https://github.com/Lightning-AI/lightning/pull/16173)) - Added support for DDP with `LRFinder` ([#15304](https://github.com/Lightning-AI/lightning/pull/15304)) @@ -223,7 +224,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added info message for Ampere CUDA GPU users to enable tf32 matmul precision ([#16037](https://github.com/Lightning-AI/lightning/pull/16037)) - Added support for returning optimizer-like classes in `LightningModule.configure_optimizers` ([#16189](https://github.com/Lightning-AI/lightning/pull/16189)) - ### Changed - Drop PyTorch 1.9 support ([#15347](https://github.com/Lightning-AI/lightning/pull/15347)) diff --git a/src/pytorch_lightning/__init__.py b/src/lightning/pytorch/__init__.py similarity index 73% rename from src/pytorch_lightning/__init__.py rename to src/lightning/pytorch/__init__.py index 9e5aeaa03c..51db8cddcc 100644 --- a/src/pytorch_lightning/__init__.py +++ b/src/lightning/pytorch/__init__.py @@ -1,12 +1,18 @@ """Root package info.""" import logging +import os from typing import Any -from pytorch_lightning.__about__ import * # noqa: F401, F403 +from lightning_utilities import module_available +if os.path.isfile(os.path.join(os.path.dirname(__file__), "__about__.py")): + from lightning.pytorch.__about__ import * # noqa: F401, F403 if "__version__" not in locals(): - from pytorch_lightning.__version__ import version as __version__ # noqa: F401 + if os.path.isfile(os.path.join(os.path.dirname(__file__), "__version__.py")): + from lightning.pytorch.__version__ import version as __version__ + elif module_available("lightning"): + from lightning import __version__ # noqa: F401 _DETAIL = 15 # between logging.INFO and logging.DEBUG, used for logging in production use cases @@ -30,13 +36,13 @@ if not _root_logger.hasHandlers(): _logger.addHandler(logging.StreamHandler()) _logger.propagate = False -from lightning_fabric.utilities.seed import seed_everything # noqa: E402 -from pytorch_lightning.callbacks import Callback # noqa: E402 -from pytorch_lightning.core import LightningDataModule, LightningModule # noqa: E402 -from pytorch_lightning.trainer import Trainer # noqa: E402 +from lightning.fabric.utilities.seed import seed_everything # noqa: E402 +from lightning.pytorch.callbacks import Callback # noqa: E402 +from lightning.pytorch.core import LightningDataModule, LightningModule # noqa: E402 +from lightning.pytorch.trainer import Trainer # noqa: E402 # this import needs to go last as it will patch other modules -import pytorch_lightning._graveyard # noqa: E402, F401 # isort: skip +import lightning.pytorch._graveyard # noqa: E402, F401 # isort: skip __all__ = ["Trainer", "LightningDataModule", "LightningModule", "Callback", "seed_everything"] diff --git a/src/pytorch_lightning/_graveyard/__init__.py b/src/lightning/pytorch/_graveyard/__init__.py similarity index 90% rename from src/pytorch_lightning/_graveyard/__init__.py rename to src/lightning/pytorch/_graveyard/__init__.py index fe18cce4e2..aa5a132574 100644 --- a/src/pytorch_lightning/_graveyard/__init__.py +++ b/src/lightning/pytorch/_graveyard/__init__.py @@ -11,4 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import pytorch_lightning._graveyard.legacy_import_unpickler # noqa: F401 +import lightning.pytorch._graveyard.legacy_import_unpickler # noqa: F401 diff --git a/src/pytorch_lightning/_graveyard/legacy_import_unpickler.py b/src/lightning/pytorch/_graveyard/legacy_import_unpickler.py similarity index 94% rename from src/pytorch_lightning/_graveyard/legacy_import_unpickler.py rename to src/lightning/pytorch/_graveyard/legacy_import_unpickler.py index a142c5369d..94d33e99d0 100644 --- a/src/pytorch_lightning/_graveyard/legacy_import_unpickler.py +++ b/src/lightning/pytorch/_graveyard/legacy_import_unpickler.py @@ -11,7 +11,7 @@ def _patch_pl_to_mirror_if_necessary(module: str) -> str: if module.startswith(pl): # for the standalone package this won't do anything, # for the unified mirror package it will redirect the imports - module = "pytorch_lightning" + module[len(pl) :] + module = "lightning.pytorch" + module[len(pl) :] return module @@ -29,7 +29,7 @@ def compare_version(package: str, op: Callable, version: str, use_base_version: return _compare_version(new_package, op, version, use_base_version) -# patching is necessary, since up to v.0.7.3 torchmetrics has a hardcoded reference to pytorch_lightning, +# patching is necessary, since up to v.0.7.3 torchmetrics has a hardcoded reference to lightning.pytorch, # which has to be redirected to the unified package: # https://github.com/Lightning-AI/metrics/blob/v0.7.3/torchmetrics/metric.py#L96 try: diff --git a/src/pytorch_lightning/accelerators/__init__.py b/src/lightning/pytorch/accelerators/__init__.py similarity index 56% rename from src/pytorch_lightning/accelerators/__init__.py rename to src/lightning/pytorch/accelerators/__init__.py index 2d26b0a3bb..e8ab8b1664 100644 --- a/src/pytorch_lightning/accelerators/__init__.py +++ b/src/lightning/pytorch/accelerators/__init__.py @@ -10,16 +10,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from lightning_fabric.accelerators import find_usable_cuda_devices # noqa: F401 -from lightning_fabric.accelerators.registry import _AcceleratorRegistry, call_register_accelerators -from pytorch_lightning.accelerators.accelerator import Accelerator # noqa: F401 -from pytorch_lightning.accelerators.cpu import CPUAccelerator # noqa: F401 -from pytorch_lightning.accelerators.cuda import CUDAAccelerator # noqa: F401 -from pytorch_lightning.accelerators.hpu import HPUAccelerator # noqa: F401 -from pytorch_lightning.accelerators.ipu import IPUAccelerator # noqa: F401 -from pytorch_lightning.accelerators.mps import MPSAccelerator # noqa: F401 -from pytorch_lightning.accelerators.tpu import TPUAccelerator # noqa: F401 +from lightning.fabric.accelerators import find_usable_cuda_devices # noqa: F401 +from lightning.fabric.accelerators.registry import _AcceleratorRegistry, call_register_accelerators +from lightning.pytorch.accelerators.accelerator import Accelerator # noqa: F401 +from lightning.pytorch.accelerators.cpu import CPUAccelerator # noqa: F401 +from lightning.pytorch.accelerators.cuda import CUDAAccelerator # noqa: F401 +from lightning.pytorch.accelerators.hpu import HPUAccelerator # noqa: F401 +from lightning.pytorch.accelerators.ipu import IPUAccelerator # noqa: F401 +from lightning.pytorch.accelerators.mps import MPSAccelerator # noqa: F401 +from lightning.pytorch.accelerators.tpu import TPUAccelerator # noqa: F401 -ACCELERATORS_BASE_MODULE = "pytorch_lightning.accelerators" +ACCELERATORS_BASE_MODULE = "lightning.pytorch.accelerators" AcceleratorRegistry = _AcceleratorRegistry() call_register_accelerators(AcceleratorRegistry, ACCELERATORS_BASE_MODULE) diff --git a/src/pytorch_lightning/accelerators/accelerator.py b/src/lightning/pytorch/accelerators/accelerator.py similarity index 89% rename from src/pytorch_lightning/accelerators/accelerator.py rename to src/lightning/pytorch/accelerators/accelerator.py index f0f98757cf..cc9ddbc6cb 100644 --- a/src/pytorch_lightning/accelerators/accelerator.py +++ b/src/lightning/pytorch/accelerators/accelerator.py @@ -14,9 +14,9 @@ from abc import ABC from typing import Any, Dict -import pytorch_lightning as pl -from lightning_fabric.accelerators.accelerator import Accelerator as _Accelerator -from lightning_fabric.utilities.types import _DEVICE +import lightning.pytorch as pl +from lightning.fabric.accelerators.accelerator import Accelerator as _Accelerator +from lightning.fabric.utilities.types import _DEVICE class Accelerator(_Accelerator, ABC): diff --git a/src/pytorch_lightning/accelerators/cpu.py b/src/lightning/pytorch/accelerators/cpu.py similarity index 89% rename from src/pytorch_lightning/accelerators/cpu.py rename to src/lightning/pytorch/accelerators/cpu.py index efca9986e3..7510ae0e2f 100644 --- a/src/pytorch_lightning/accelerators/cpu.py +++ b/src/lightning/pytorch/accelerators/cpu.py @@ -15,11 +15,11 @@ from typing import Any, Dict, List, Union import torch -from lightning_fabric.accelerators.cpu import _parse_cpu_cores -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _PSUTIL_AVAILABLE +from lightning.fabric.accelerators.cpu import _parse_cpu_cores +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _PSUTIL_AVAILABLE class CPUAccelerator(Accelerator): diff --git a/src/pytorch_lightning/accelerators/cuda.py b/src/lightning/pytorch/accelerators/cuda.py similarity index 93% rename from src/pytorch_lightning/accelerators/cuda.py rename to src/lightning/pytorch/accelerators/cuda.py index eead514082..c5224c013a 100644 --- a/src/pytorch_lightning/accelerators/cuda.py +++ b/src/lightning/pytorch/accelerators/cuda.py @@ -19,12 +19,12 @@ from typing import Any, Dict, List, Optional, Union import torch -import pytorch_lightning as pl -from lightning_fabric.accelerators.cuda import _check_cuda_matmul_precision, num_cuda_devices -from lightning_fabric.utilities.device_parser import _parse_gpu_ids -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.fabric.accelerators.cuda import _check_cuda_matmul_precision, num_cuda_devices +from lightning.fabric.utilities.device_parser import _parse_gpu_ids +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.utilities.exceptions import MisconfigurationException _log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/lightning/pytorch/accelerators/hpu.py similarity index 91% rename from src/pytorch_lightning/accelerators/hpu.py rename to src/lightning/pytorch/accelerators/hpu.py index d0e89dd0aa..4fb8ca0441 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/lightning/pytorch/accelerators/hpu.py @@ -16,11 +16,11 @@ from typing import Any, Dict, List, Optional, Union import torch -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _HPU_AVAILABLE -from pytorch_lightning.utilities.rank_zero import rank_zero_debug +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _HPU_AVAILABLE +from lightning.pytorch.utilities.rank_zero import rank_zero_debug if _HPU_AVAILABLE: import habana_frameworks.torch.hpu as torch_hpu @@ -108,7 +108,7 @@ class HPUAccelerator(Accelerator): def _parse_hpus(devices: Optional[Union[int, str, List[int]]]) -> Optional[int]: """ Parses the hpus given in the format as accepted by the - :class:`~pytorch_lightning.trainer.Trainer` for the `devices` flag. + :class:`~lightning.pytorch.trainer.Trainer` for the `devices` flag. Args: devices: An integer that indicates the number of Gaudi devices to be used diff --git a/src/pytorch_lightning/accelerators/ipu.py b/src/lightning/pytorch/accelerators/ipu.py similarity index 91% rename from src/pytorch_lightning/accelerators/ipu.py rename to src/lightning/pytorch/accelerators/ipu.py index 557fe9a53c..2e84fcc450 100644 --- a/src/pytorch_lightning/accelerators/ipu.py +++ b/src/lightning/pytorch/accelerators/ipu.py @@ -15,9 +15,9 @@ from typing import Any, Dict, List import torch -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.utilities.imports import _IPU_AVAILABLE +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.utilities.imports import _IPU_AVAILABLE class IPUAccelerator(Accelerator): diff --git a/src/pytorch_lightning/accelerators/mps.py b/src/lightning/pytorch/accelerators/mps.py similarity index 88% rename from src/pytorch_lightning/accelerators/mps.py rename to src/lightning/pytorch/accelerators/mps.py index 8530821064..c309a61d0f 100644 --- a/src/pytorch_lightning/accelerators/mps.py +++ b/src/lightning/pytorch/accelerators/mps.py @@ -15,12 +15,12 @@ from typing import Any, Dict, List, Optional, Union import torch -from lightning_fabric.accelerators.mps import MPSAccelerator as _MPSAccelerator -from lightning_fabric.utilities.device_parser import _parse_gpu_ids -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _PSUTIL_AVAILABLE +from lightning.fabric.accelerators.mps import MPSAccelerator as _MPSAccelerator +from lightning.fabric.utilities.device_parser import _parse_gpu_ids +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _PSUTIL_AVAILABLE class MPSAccelerator(Accelerator): diff --git a/src/pytorch_lightning/accelerators/tpu.py b/src/lightning/pytorch/accelerators/tpu.py similarity index 91% rename from src/pytorch_lightning/accelerators/tpu.py rename to src/lightning/pytorch/accelerators/tpu.py index 8aac285cea..74c3c88692 100644 --- a/src/pytorch_lightning/accelerators/tpu.py +++ b/src/lightning/pytorch/accelerators/tpu.py @@ -15,10 +15,10 @@ from typing import Any, Dict, List, Optional, Union import torch -from lightning_fabric.accelerators.tpu import _parse_tpu_devices, _XLA_AVAILABLE -from lightning_fabric.accelerators.tpu import TPUAccelerator as FabricTPUAccelerator -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.accelerators.accelerator import Accelerator +from lightning.fabric.accelerators.tpu import _parse_tpu_devices, _XLA_AVAILABLE +from lightning.fabric.accelerators.tpu import TPUAccelerator as FabricTPUAccelerator +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.accelerators.accelerator import Accelerator class TPUAccelerator(Accelerator): diff --git a/src/pytorch_lightning/callbacks/__init__.py b/src/lightning/pytorch/callbacks/__init__.py similarity index 52% rename from src/pytorch_lightning/callbacks/__init__.py rename to src/lightning/pytorch/callbacks/__init__.py index 2d381d30db..ad0c1a95f7 100644 --- a/src/pytorch_lightning/callbacks/__init__.py +++ b/src/lightning/pytorch/callbacks/__init__.py @@ -11,26 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.callbacks.batch_size_finder import BatchSizeFinder -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.callbacks.checkpoint import Checkpoint -from pytorch_lightning.callbacks.device_stats_monitor import DeviceStatsMonitor -from pytorch_lightning.callbacks.early_stopping import EarlyStopping -from pytorch_lightning.callbacks.finetuning import BackboneFinetuning, BaseFinetuning -from pytorch_lightning.callbacks.gradient_accumulation_scheduler import GradientAccumulationScheduler -from pytorch_lightning.callbacks.lambda_function import LambdaCallback -from pytorch_lightning.callbacks.lr_finder import LearningRateFinder -from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint -from pytorch_lightning.callbacks.model_summary import ModelSummary -from pytorch_lightning.callbacks.on_exception_checkpoint import OnExceptionCheckpoint -from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter -from pytorch_lightning.callbacks.progress import ProgressBarBase, RichProgressBar, TQDMProgressBar -from pytorch_lightning.callbacks.pruning import ModelPruning -from pytorch_lightning.callbacks.quantization import QuantizationAwareTraining -from pytorch_lightning.callbacks.rich_model_summary import RichModelSummary -from pytorch_lightning.callbacks.stochastic_weight_avg import StochasticWeightAveraging -from pytorch_lightning.callbacks.timer import Timer +from lightning.pytorch.callbacks.batch_size_finder import BatchSizeFinder +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.callbacks.checkpoint import Checkpoint +from lightning.pytorch.callbacks.device_stats_monitor import DeviceStatsMonitor +from lightning.pytorch.callbacks.early_stopping import EarlyStopping +from lightning.pytorch.callbacks.finetuning import BackboneFinetuning, BaseFinetuning +from lightning.pytorch.callbacks.gradient_accumulation_scheduler import GradientAccumulationScheduler +from lightning.pytorch.callbacks.lambda_function import LambdaCallback +from lightning.pytorch.callbacks.lr_finder import LearningRateFinder +from lightning.pytorch.callbacks.lr_monitor import LearningRateMonitor +from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint +from lightning.pytorch.callbacks.model_summary import ModelSummary +from lightning.pytorch.callbacks.on_exception_checkpoint import OnExceptionCheckpoint +from lightning.pytorch.callbacks.prediction_writer import BasePredictionWriter +from lightning.pytorch.callbacks.progress import ProgressBarBase, RichProgressBar, TQDMProgressBar +from lightning.pytorch.callbacks.pruning import ModelPruning +from lightning.pytorch.callbacks.quantization import QuantizationAwareTraining +from lightning.pytorch.callbacks.rich_model_summary import RichModelSummary +from lightning.pytorch.callbacks.stochastic_weight_avg import StochasticWeightAveraging +from lightning.pytorch.callbacks.timer import Timer __all__ = [ "BackboneFinetuning", diff --git a/src/pytorch_lightning/callbacks/batch_size_finder.py b/src/lightning/pytorch/callbacks/batch_size_finder.py similarity index 94% rename from src/pytorch_lightning/callbacks/batch_size_finder.py rename to src/lightning/pytorch/callbacks/batch_size_finder.py index 002a2a498e..027c93146a 100644 --- a/src/pytorch_lightning/callbacks/batch_size_finder.py +++ b/src/lightning/pytorch/callbacks/batch_size_finder.py @@ -20,12 +20,12 @@ Finds optimal batch size from typing import Optional -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.tuner.batch_size_scaling import _scale_batch_size -from pytorch_lightning.utilities.exceptions import _TunerExitException, MisconfigurationException -from pytorch_lightning.utilities.parsing import lightning_hasattr -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.tuner.batch_size_scaling import _scale_batch_size +from lightning.pytorch.utilities.exceptions import _TunerExitException, MisconfigurationException +from lightning.pytorch.utilities.parsing import lightning_hasattr +from lightning.pytorch.utilities.rank_zero import rank_zero_warn class BatchSizeFinder(Callback): @@ -63,7 +63,7 @@ class BatchSizeFinder(Callback): # 1. Customize the BatchSizeFinder callback to run at different epochs. This feature is # useful while fine-tuning models since you can't always use the same batch size after # unfreezing the backbone. - from pytorch_lightning.callbacks import BatchSizeFinder + from lightning.pytorch.callbacks import BatchSizeFinder class FineTuneBatchSizeFinder(BatchSizeFinder): @@ -85,7 +85,7 @@ class BatchSizeFinder(Callback): Example:: # 2. Run batch size finder for validate/test/predict. - from pytorch_lightning.callbacks import BatchSizeFinder + from lightning.pytorch.callbacks import BatchSizeFinder class EvalBatchSizeFinder(BatchSizeFinder): diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/lightning/pytorch/callbacks/callback.py similarity index 96% rename from src/pytorch_lightning/callbacks/callback.py rename to src/lightning/pytorch/callbacks/callback.py index dda36f1280..b3e4cf0adc 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/lightning/pytorch/callbacks/callback.py @@ -21,8 +21,8 @@ from typing import Any, Dict, List, Optional, Type from torch import Tensor from torch.optim import Optimizer -import pytorch_lightning as pl -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.pytorch.utilities.types import STEP_OUTPUT class Callback: @@ -217,8 +217,8 @@ class Callback: Called when saving a checkpoint to give you a chance to store anything else you might want to save. Args: - trainer: the current :class:`~pytorch_lightning.trainer.Trainer` instance. - pl_module: the current :class:`~pytorch_lightning.core.module.LightningModule` instance. + trainer: the current :class:`~lightning.pytorch.trainer.Trainer` instance. + pl_module: the current :class:`~lightning.pytorch.core.module.LightningModule` instance. checkpoint: the checkpoint dictionary that will be saved. """ @@ -229,8 +229,8 @@ class Callback: Called when loading a model checkpoint, use to reload state. Args: - trainer: the current :class:`~pytorch_lightning.trainer.Trainer` instance. - pl_module: the current :class:`~pytorch_lightning.core.module.LightningModule` instance. + trainer: the current :class:`~lightning.pytorch.trainer.Trainer` instance. + pl_module: the current :class:`~lightning.pytorch.core.module.LightningModule` instance. checkpoint: the full checkpoint dictionary that got loaded by the Trainer. """ diff --git a/src/pytorch_lightning/callbacks/checkpoint.py b/src/lightning/pytorch/callbacks/checkpoint.py similarity index 66% rename from src/pytorch_lightning/callbacks/checkpoint.py rename to src/lightning/pytorch/callbacks/checkpoint.py index 405f29876c..2dd5917601 100644 --- a/src/pytorch_lightning/callbacks/checkpoint.py +++ b/src/lightning/pytorch/callbacks/checkpoint.py @@ -1,9 +1,9 @@ -from pytorch_lightning.callbacks.callback import Callback +from lightning.pytorch.callbacks.callback import Callback class Checkpoint(Callback): r""" This is the base class for model checkpointing. Expert users may want to subclass it in case of writing - custom :class:`~pytorch_lightning.callbacksCheckpoint` callback, so that + custom :class:`~lightning.pytorch.callbacksCheckpoint` callback, so that the trainer recognizes the custom class as a checkpointing callback. """ diff --git a/src/pytorch_lightning/callbacks/device_stats_monitor.py b/src/lightning/pytorch/callbacks/device_stats_monitor.py similarity index 91% rename from src/pytorch_lightning/callbacks/device_stats_monitor.py rename to src/lightning/pytorch/callbacks/device_stats_monitor.py index 1841ad8bda..d1f6dec194 100644 --- a/src/pytorch_lightning/callbacks/device_stats_monitor.py +++ b/src/lightning/pytorch/callbacks/device_stats_monitor.py @@ -20,12 +20,12 @@ Monitors and logs device stats during training. """ from typing import Any, Dict, Optional -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _PSUTIL_AVAILABLE -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _PSUTIL_AVAILABLE +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.types import STEP_OUTPUT class DeviceStatsMonitor(Callback): @@ -46,8 +46,8 @@ class DeviceStatsMonitor(Callback): If ``Trainer`` has no logger. Example: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import DeviceStatsMonitor + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import DeviceStatsMonitor >>> device_stats = DeviceStatsMonitor() # doctest: +SKIP >>> trainer = Trainer(callbacks=[device_stats]) # doctest: +SKIP """ @@ -91,7 +91,7 @@ class DeviceStatsMonitor(Callback): if self._cpu_stats and device.type != "cpu": # Don't query CPU stats twice if CPU is accelerator - from pytorch_lightning.accelerators.cpu import get_cpu_stats + from lightning.pytorch.accelerators.cpu import get_cpu_stats device_stats.update(get_cpu_stats()) diff --git a/src/pytorch_lightning/callbacks/early_stopping.py b/src/lightning/pytorch/callbacks/early_stopping.py similarity index 96% rename from src/pytorch_lightning/callbacks/early_stopping.py rename to src/lightning/pytorch/callbacks/early_stopping.py index e42b587052..041a6c43e1 100644 --- a/src/pytorch_lightning/callbacks/early_stopping.py +++ b/src/lightning/pytorch/callbacks/early_stopping.py @@ -25,11 +25,11 @@ import numpy as np import torch from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.utilities.rank_zero import _get_rank -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_prefixed_message, rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.utilities.rank_zero import _get_rank +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_prefixed_message, rank_zero_warn log = logging.getLogger(__name__) @@ -74,8 +74,8 @@ class EarlyStopping(Callback): Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import EarlyStopping + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import EarlyStopping >>> early_stopping = EarlyStopping('val_loss') >>> trainer = Trainer(callbacks=[early_stopping]) @@ -174,7 +174,7 @@ class EarlyStopping(Callback): self.patience = state_dict["patience"] def _should_skip_check(self, trainer: "pl.Trainer") -> bool: - from pytorch_lightning.trainer.states import TrainerFn + from lightning.pytorch.trainer.states import TrainerFn return trainer.state.fn != TrainerFn.FITTING or trainer.sanity_checking diff --git a/src/pytorch_lightning/callbacks/finetuning.py b/src/lightning/pytorch/callbacks/finetuning.py similarity index 98% rename from src/pytorch_lightning/callbacks/finetuning.py rename to src/lightning/pytorch/callbacks/finetuning.py index 72f70c3b30..c4a1fea0c8 100644 --- a/src/pytorch_lightning/callbacks/finetuning.py +++ b/src/lightning/pytorch/callbacks/finetuning.py @@ -24,10 +24,10 @@ from torch.nn import Module, ModuleDict from torch.nn.modules.batchnorm import _BatchNorm from torch.optim.optimizer import Optimizer -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_warn log = logging.getLogger(__name__) @@ -328,8 +328,8 @@ class BackboneFinetuning(BaseFinetuning): Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import BackboneFinetuning + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import BackboneFinetuning >>> multiplicative = lambda epoch: 1.5 >>> backbone_finetuning = BackboneFinetuning(200, multiplicative) >>> trainer = Trainer(callbacks=[backbone_finetuning]) diff --git a/src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py b/src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py similarity index 93% rename from src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py rename to src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py index a21f90edd4..c92c9d58c9 100644 --- a/src/pytorch_lightning/callbacks/gradient_accumulation_scheduler.py +++ b/src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py @@ -22,9 +22,9 @@ Trainer also calls ``optimizer.step()`` for the last indivisible step number. from typing import Any, Dict -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException class GradientAccumulationScheduler(Callback): @@ -51,8 +51,8 @@ class GradientAccumulationScheduler(Callback): Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import GradientAccumulationScheduler + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import GradientAccumulationScheduler # from epoch 5, it starts accumulating every 2 batches. Here we have 4 instead of 5 # because epoch (key) should be zero-indexed. diff --git a/src/pytorch_lightning/callbacks/lambda_function.py b/src/lightning/pytorch/callbacks/lambda_function.py similarity index 93% rename from src/pytorch_lightning/callbacks/lambda_function.py rename to src/lightning/pytorch/callbacks/lambda_function.py index c56f0dd779..d28a523026 100644 --- a/src/pytorch_lightning/callbacks/lambda_function.py +++ b/src/lightning/pytorch/callbacks/lambda_function.py @@ -21,7 +21,7 @@ Create a simple callback on the fly using lambda functions. from typing import Callable, Optional -from pytorch_lightning.callbacks.callback import Callback +from lightning.pytorch.callbacks.callback import Callback class LambdaCallback(Callback): @@ -29,12 +29,12 @@ class LambdaCallback(Callback): Create a simple callback on the fly using lambda functions. Args: - **kwargs: hooks supported by :class:`~pytorch_lightning.callbacks.callback.Callback` + **kwargs: hooks supported by :class:`~lightning.pytorch.callbacks.callback.Callback` Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import LambdaCallback + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import LambdaCallback >>> trainer = Trainer(callbacks=[LambdaCallback(setup=lambda *args: print('setup'))]) """ diff --git a/src/pytorch_lightning/callbacks/lr_finder.py b/src/lightning/pytorch/callbacks/lr_finder.py similarity index 92% rename from src/pytorch_lightning/callbacks/lr_finder.py rename to src/lightning/pytorch/callbacks/lr_finder.py index 8a60c87de5..26a1452391 100644 --- a/src/pytorch_lightning/callbacks/lr_finder.py +++ b/src/lightning/pytorch/callbacks/lr_finder.py @@ -19,11 +19,11 @@ Finds optimal learning rate """ from typing import Optional -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.tuner.lr_finder import _lr_find, _LRFinder -from pytorch_lightning.utilities.exceptions import _TunerExitException -from pytorch_lightning.utilities.seed import isolate_rng +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.tuner.lr_finder import _lr_find, _LRFinder +from lightning.pytorch.utilities.exceptions import _TunerExitException +from lightning.pytorch.utilities.seed import isolate_rng class LearningRateFinder(Callback): @@ -50,7 +50,7 @@ class LearningRateFinder(Callback): # Customize LearningRateFinder callback to run at different epochs. # This feature is useful while fine-tuning models. - from pytorch_lightning.callbacks import LearningRateFinder + from lightning.pytorch.callbacks import LearningRateFinder class FineTuneLearningRateFinder(LearningRateFinder): diff --git a/src/pytorch_lightning/callbacks/lr_monitor.py b/src/lightning/pytorch/callbacks/lr_monitor.py similarity index 97% rename from src/pytorch_lightning/callbacks/lr_monitor.py rename to src/lightning/pytorch/callbacks/lr_monitor.py index fd300fd076..afec2b7f1e 100644 --- a/src/pytorch_lightning/callbacks/lr_monitor.py +++ b/src/lightning/pytorch/callbacks/lr_monitor.py @@ -25,11 +25,11 @@ from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple, Type from torch.optim.optimizer import Optimizer -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.types import LRSchedulerConfig +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.types import LRSchedulerConfig class LearningRateMonitor(Callback): @@ -49,8 +49,8 @@ class LearningRateMonitor(Callback): Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import LearningRateMonitor + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import LearningRateMonitor >>> lr_monitor = LearningRateMonitor(logging_interval='step') >>> trainer = Trainer(callbacks=[lr_monitor]) diff --git a/src/pytorch_lightning/callbacks/model_checkpoint.py b/src/lightning/pytorch/callbacks/model_checkpoint.py similarity index 97% rename from src/pytorch_lightning/callbacks/model_checkpoint.py rename to src/lightning/pytorch/callbacks/model_checkpoint.py index 447b5c8322..d252183e69 100644 --- a/src/pytorch_lightning/callbacks/model_checkpoint.py +++ b/src/lightning/pytorch/callbacks/model_checkpoint.py @@ -32,13 +32,13 @@ import torch import yaml from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.utilities.cloud_io import get_filesystem -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.callbacks import Checkpoint -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn, WarningCache -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.callbacks import Checkpoint +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn, WarningCache +from lightning.pytorch.utilities.types import STEP_OUTPUT log = logging.getLogger(__name__) warning_cache = WarningCache() @@ -47,7 +47,7 @@ warning_cache = WarningCache() class ModelCheckpoint(Checkpoint): r""" Save the model periodically by monitoring a quantity. Every metric logged with - :meth:`~pytorch_lightning.core.module.log` or :meth:`~pytorch_lightning.core.module.log_dict` in + :meth:`~lightning.pytorch.core.module.log` or :meth:`~lightning.pytorch.core.module.log_dict` in LightningModule is a candidate for the monitor key. For more information, see :ref:`checkpointing`. @@ -64,8 +64,8 @@ class ModelCheckpoint(Checkpoint): >>> checkpoint_callback = ModelCheckpoint(dirpath='my/path/') By default, dirpath is ``None`` and will be set at runtime to the location - specified by :class:`~pytorch_lightning.trainer.trainer.Trainer`'s - :paramref:`~pytorch_lightning.trainer.trainer.Trainer.default_root_dir` argument, + specified by :class:`~lightning.pytorch.trainer.trainer.Trainer`'s + :paramref:`~lightning.pytorch.trainer.trainer.Trainer.default_root_dir` argument, and if the Trainer uses a logger, the path will also contain logger name and version. filename: checkpoint filename. Can contain named formatting options to be auto-filled. @@ -157,8 +157,8 @@ class ModelCheckpoint(Checkpoint): Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import ModelCheckpoint + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import ModelCheckpoint # saves checkpoints to 'my/path/' at every epoch >>> checkpoint_callback = ModelCheckpoint(dirpath='my/path/') @@ -371,7 +371,7 @@ class ModelCheckpoint(Checkpoint): logger.after_save_checkpoint(proxy(self)) def _should_skip_saving_checkpoint(self, trainer: "pl.Trainer") -> bool: - from pytorch_lightning.trainer.states import TrainerFn + from lightning.pytorch.trainer.states import TrainerFn return ( bool(trainer.fast_dev_run) # disable checkpointing with fast_dev_run diff --git a/src/pytorch_lightning/callbacks/model_summary.py b/src/lightning/pytorch/callbacks/model_summary.py similarity index 78% rename from src/pytorch_lightning/callbacks/model_summary.py rename to src/lightning/pytorch/callbacks/model_summary.py index 5b7c1be91e..434ba92df1 100644 --- a/src/pytorch_lightning/callbacks/model_summary.py +++ b/src/lightning/pytorch/callbacks/model_summary.py @@ -15,7 +15,7 @@ Model Summary ============= -Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule`. +Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule`. The string representation of this summary prints a table with columns containing the name, type and number of parameters for each layer. @@ -24,19 +24,19 @@ the name, type and number of parameters for each layer. import logging from typing import List, Tuple, Union -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.model_summary import DeepSpeedSummary -from pytorch_lightning.utilities.model_summary import ModelSummary as Summary -from pytorch_lightning.utilities.model_summary import summarize -from pytorch_lightning.utilities.model_summary.model_summary import _format_summary_table +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.model_summary import DeepSpeedSummary +from lightning.pytorch.utilities.model_summary import ModelSummary as Summary +from lightning.pytorch.utilities.model_summary import summarize +from lightning.pytorch.utilities.model_summary.model_summary import _format_summary_table log = logging.getLogger(__name__) class ModelSummary(Callback): r""" - Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule`. + Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule`. Args: max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the @@ -44,8 +44,8 @@ class ModelSummary(Callback): Example:: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import ModelSummary + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import ModelSummary >>> trainer = Trainer(callbacks=[ModelSummary(max_depth=1)]) """ @@ -66,7 +66,7 @@ class ModelSummary(Callback): self.summarize(summary_data, total_parameters, trainable_parameters, model_size) def _summary(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Union[DeepSpeedSummary, Summary]: - from pytorch_lightning.strategies.deepspeed import DeepSpeedStrategy + from lightning.pytorch.strategies.deepspeed import DeepSpeedStrategy if isinstance(trainer.strategy, DeepSpeedStrategy) and trainer.strategy.zero_stage_3: return DeepSpeedSummary(pl_module, max_depth=self._max_depth) diff --git a/src/pytorch_lightning/callbacks/on_exception_checkpoint.py b/src/lightning/pytorch/callbacks/on_exception_checkpoint.py similarity index 88% rename from src/pytorch_lightning/callbacks/on_exception_checkpoint.py rename to src/lightning/pytorch/callbacks/on_exception_checkpoint.py index 8bbd24aab3..59c99fba0e 100644 --- a/src/pytorch_lightning/callbacks/on_exception_checkpoint.py +++ b/src/lightning/pytorch/callbacks/on_exception_checkpoint.py @@ -20,9 +20,9 @@ Automatically save a checkpoints on exception. import os from typing import Any -import pytorch_lightning as pl -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.callbacks import Checkpoint +import lightning.pytorch as pl +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.callbacks import Checkpoint class OnExceptionCheckpoint(Checkpoint): @@ -38,8 +38,8 @@ class OnExceptionCheckpoint(Checkpoint): Example: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.callbacks import OnExceptionCheckpoint + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.callbacks import OnExceptionCheckpoint >>> trainer = Trainer(callbacks=[OnExceptionCheckpoint(".")]) """ diff --git a/src/pytorch_lightning/callbacks/prediction_writer.py b/src/lightning/pytorch/callbacks/prediction_writer.py similarity index 94% rename from src/pytorch_lightning/callbacks/prediction_writer.py rename to src/lightning/pytorch/callbacks/prediction_writer.py index 129d3a6804..7f8286d29b 100644 --- a/src/pytorch_lightning/callbacks/prediction_writer.py +++ b/src/lightning/pytorch/callbacks/prediction_writer.py @@ -19,10 +19,10 @@ Aids in saving predictions """ from typing import Any, Literal, Optional, Sequence -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities import LightningEnum -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities import LightningEnum +from lightning.pytorch.utilities.exceptions import MisconfigurationException class WriteInterval(LightningEnum): @@ -48,7 +48,7 @@ class BasePredictionWriter(Callback): Example:: import torch - from pytorch_lightning.callbacks import BasePredictionWriter + from lightning.pytorch.callbacks import BasePredictionWriter class CustomWriter(BasePredictionWriter): @@ -75,7 +75,7 @@ class BasePredictionWriter(Callback): # multi-device inference example import torch - from pytorch_lightning.callbacks import BasePredictionWriter + from lightning.pytorch.callbacks import BasePredictionWriter class CustomWriter(BasePredictionWriter): diff --git a/src/pytorch_lightning/callbacks/progress/__init__.py b/src/lightning/pytorch/callbacks/progress/__init__.py similarity index 79% rename from src/pytorch_lightning/callbacks/progress/__init__.py rename to src/lightning/pytorch/callbacks/progress/__init__.py index a0f3fa2714..de770d5c85 100644 --- a/src/pytorch_lightning/callbacks/progress/__init__.py +++ b/src/lightning/pytorch/callbacks/progress/__init__.py @@ -18,6 +18,6 @@ Progress Bars Use or override one of the progress bar callbacks. """ -from pytorch_lightning.callbacks.progress.base import ProgressBarBase # noqa: F401 -from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBar # noqa: F401 -from pytorch_lightning.callbacks.progress.tqdm_progress import TQDMProgressBar # noqa: F401 +from lightning.pytorch.callbacks.progress.base import ProgressBarBase # noqa: F401 +from lightning.pytorch.callbacks.progress.rich_progress import RichProgressBar # noqa: F401 +from lightning.pytorch.callbacks.progress.tqdm_progress import TQDMProgressBar # noqa: F401 diff --git a/src/pytorch_lightning/callbacks/progress/base.py b/src/lightning/pytorch/callbacks/progress/base.py similarity index 96% rename from src/pytorch_lightning/callbacks/progress/base.py rename to src/lightning/pytorch/callbacks/progress/base.py index 5681019d7a..1edc2e940d 100644 --- a/src/pytorch_lightning/callbacks/progress/base.py +++ b/src/lightning/pytorch/callbacks/progress/base.py @@ -13,16 +13,16 @@ # limitations under the License. from typing import Any, Dict, Optional, Union -import pytorch_lightning as pl -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.logger import _version -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.callbacks import Callback +from lightning.pytorch.utilities.logger import _version +from lightning.pytorch.utilities.rank_zero import rank_zero_warn class ProgressBarBase(Callback): r""" - The base class for progress bars in Lightning. It is a :class:`~pytorch_lightning.callbacks.Callback` - that keeps track of the batch progress in the :class:`~pytorch_lightning.trainer.trainer.Trainer`. + The base class for progress bars in Lightning. It is a :class:`~lightning.pytorch.callbacks.Callback` + that keeps track of the batch progress in the :class:`~lightning.pytorch.trainer.trainer.Trainer`. You should implement your highly custom progress bars with this as the base class. Example:: @@ -207,7 +207,7 @@ class ProgressBarBase(Callback): def enable(self) -> None: """You should provide a way to enable the progress bar. - The :class:`~pytorch_lightning.trainer.trainer.Trainer` will call this in e.g. pre-training + The :class:`~lightning.pytorch.trainer.trainer.Trainer` will call this in e.g. pre-training routines like the :ref:`learning rate finder `. to temporarily enable and disable the main progress bar. """ diff --git a/src/pytorch_lightning/callbacks/progress/rich_progress.py b/src/lightning/pytorch/callbacks/progress/rich_progress.py similarity index 98% rename from src/pytorch_lightning/callbacks/progress/rich_progress.py rename to src/lightning/pytorch/callbacks/progress/rich_progress.py index 41c53bea31..eb87e964de 100644 --- a/src/pytorch_lightning/callbacks/progress/rich_progress.py +++ b/src/lightning/pytorch/callbacks/progress/rich_progress.py @@ -18,9 +18,9 @@ from typing import Any, cast, Dict, Optional, Union from lightning_utilities.core.imports import RequirementCache -import pytorch_lightning as pl -from pytorch_lightning.callbacks.progress.base import ProgressBarBase -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.pytorch.callbacks.progress.base import ProgressBarBase +from lightning.pytorch.utilities.types import STEP_OUTPUT _RICH_AVAILABLE: bool = RequirementCache("rich>=10.2.2") @@ -215,8 +215,8 @@ class RichProgressBar(ProgressBarBase): .. code-block:: python - from pytorch_lightning import Trainer - from pytorch_lightning.callbacks import RichProgressBar + from lightning.pytorch import Trainer + from lightning.pytorch.callbacks import RichProgressBar trainer = Trainer(callbacks=RichProgressBar()) diff --git a/src/pytorch_lightning/callbacks/progress/tqdm_progress.py b/src/lightning/pytorch/callbacks/progress/tqdm_progress.py similarity index 97% rename from src/pytorch_lightning/callbacks/progress/tqdm_progress.py rename to src/lightning/pytorch/callbacks/progress/tqdm_progress.py index a64451bbab..a1616c9815 100644 --- a/src/pytorch_lightning/callbacks/progress/tqdm_progress.py +++ b/src/lightning/pytorch/callbacks/progress/tqdm_progress.py @@ -25,9 +25,9 @@ if importlib.util.find_spec("ipywidgets") is not None: else: from tqdm import tqdm as _tqdm -import pytorch_lightning as pl -from pytorch_lightning.callbacks.progress.base import ProgressBarBase -from pytorch_lightning.utilities.rank_zero import rank_zero_debug +import lightning.pytorch as pl +from lightning.pytorch.callbacks.progress.base import ProgressBarBase +from lightning.pytorch.utilities.rank_zero import rank_zero_debug _PAD_SIZE = 5 @@ -65,7 +65,7 @@ class TQDMProgressBar(ProgressBarBase): - **sanity check progress:** the progress during the sanity check run - **main progress:** shows training + validation progress combined. It also accounts for multiple validation runs during training when - :paramref:`~pytorch_lightning.trainer.trainer.Trainer.val_check_interval` is used. + :paramref:`~lightning.pytorch.trainer.trainer.Trainer.val_check_interval` is used. - **validation progress:** only visible during validation; shows total progress over all validation datasets. - **test progress:** only active when testing; shows total progress over all test datasets. @@ -74,7 +74,7 @@ class TQDMProgressBar(ProgressBarBase): If you want to customize the default ``tqdm`` progress bars used by Lightning, you can override specific methods of the callback class and pass your custom implementation to the - :class:`~pytorch_lightning.trainer.trainer.Trainer`. + :class:`~lightning.pytorch.trainer.trainer.Trainer`. Example: @@ -85,7 +85,7 @@ class TQDMProgressBar(ProgressBarBase): ... return bar ... >>> bar = LitProgressBar() - >>> from pytorch_lightning import Trainer + >>> from lightning.pytorch import Trainer >>> trainer = Trainer(callbacks=[bar]) Args: @@ -94,8 +94,8 @@ class TQDMProgressBar(ProgressBarBase): process_position: Set this to a value greater than ``0`` to offset the progress bars by this many lines. This is useful when you have progress bars defined elsewhere and want to show all of them together. This corresponds to - :paramref:`~pytorch_lightning.trainer.trainer.Trainer.process_position` in the - :class:`~pytorch_lightning.trainer.trainer.Trainer`. + :paramref:`~lightning.pytorch.trainer.trainer.Trainer.process_position` in the + :class:`~lightning.pytorch.trainer.trainer.Trainer`. """ def __init__(self, refresh_rate: int = 1, process_position: int = 0): diff --git a/src/pytorch_lightning/callbacks/pruning.py b/src/lightning/pytorch/callbacks/pruning.py similarity index 98% rename from src/pytorch_lightning/callbacks/pruning.py rename to src/lightning/pytorch/callbacks/pruning.py index 41ced73b01..89cb756b2d 100644 --- a/src/pytorch_lightning/callbacks/pruning.py +++ b/src/lightning/pytorch/callbacks/pruning.py @@ -26,11 +26,11 @@ from lightning_utilities.core.apply_func import apply_to_collection from torch import nn, Tensor from typing_extensions import TypedDict -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.core.module import LightningModule -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_only +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.core.module import LightningModule +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_only log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/callbacks/quantization.py b/src/lightning/pytorch/callbacks/quantization.py similarity index 98% rename from src/pytorch_lightning/callbacks/quantization.py rename to src/lightning/pytorch/callbacks/quantization.py index 5fe79cdb76..5be2ca5690 100644 --- a/src/pytorch_lightning/callbacks/quantization.py +++ b/src/lightning/pytorch/callbacks/quantization.py @@ -25,10 +25,10 @@ from torch import Tensor from torch.ao.quantization.qconfig import QConfig from torch.quantization import FakeQuantizeBase -import pytorch_lightning as pl -from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_11, _TORCH_GREATER_EQUAL_1_12 -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_11, _TORCH_GREATER_EQUAL_1_12 +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException if _TORCH_GREATER_EQUAL_1_11: from torch.ao.quantization import fuse_modules_qat as fuse_modules diff --git a/src/pytorch_lightning/callbacks/rich_model_summary.py b/src/lightning/pytorch/callbacks/rich_model_summary.py similarity index 85% rename from src/pytorch_lightning/callbacks/rich_model_summary.py rename to src/lightning/pytorch/callbacks/rich_model_summary.py index 7373d91263..513adc34d5 100644 --- a/src/pytorch_lightning/callbacks/rich_model_summary.py +++ b/src/lightning/pytorch/callbacks/rich_model_summary.py @@ -13,9 +13,9 @@ # limitations under the License. from typing import List, Tuple -from pytorch_lightning.callbacks import ModelSummary -from pytorch_lightning.utilities.imports import _RICH_AVAILABLE -from pytorch_lightning.utilities.model_summary import get_human_readable_count +from lightning.pytorch.callbacks import ModelSummary +from lightning.pytorch.utilities.imports import _RICH_AVAILABLE +from lightning.pytorch.utilities.model_summary import get_human_readable_count if _RICH_AVAILABLE: from rich import get_console @@ -24,7 +24,7 @@ if _RICH_AVAILABLE: class RichModelSummary(ModelSummary): r""" - Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule` + Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule` with `rich text formatting `_. Install it with pip: @@ -35,17 +35,17 @@ class RichModelSummary(ModelSummary): .. code-block:: python - from pytorch_lightning import Trainer - from pytorch_lightning.callbacks import RichModelSummary + from lightning.pytorch import Trainer + from lightning.pytorch.callbacks import RichModelSummary trainer = Trainer(callbacks=RichModelSummary()) - You could also enable ``RichModelSummary`` using the :class:`~pytorch_lightning.callbacks.RichProgressBar` + You could also enable ``RichModelSummary`` using the :class:`~lightning.pytorch.callbacks.RichProgressBar` .. code-block:: python - from pytorch_lightning import Trainer - from pytorch_lightning.callbacks import RichProgressBar + from lightning.pytorch import Trainer + from lightning.pytorch.callbacks import RichProgressBar trainer = Trainer(callbacks=RichProgressBar()) diff --git a/src/pytorch_lightning/callbacks/stochastic_weight_avg.py b/src/lightning/pytorch/callbacks/stochastic_weight_avg.py similarity index 97% rename from src/pytorch_lightning/callbacks/stochastic_weight_avg.py rename to src/lightning/pytorch/callbacks/stochastic_weight_avg.py index 945732a45c..8065d0ebd0 100644 --- a/src/pytorch_lightning/callbacks/stochastic_weight_avg.py +++ b/src/lightning/pytorch/callbacks/stochastic_weight_avg.py @@ -22,14 +22,14 @@ import torch from torch import nn, Tensor from torch.optim.swa_utils import SWALR -import pytorch_lightning as pl -from lightning_fabric.utilities.types import LRScheduler -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.strategies import DeepSpeedStrategy -from pytorch_lightning.strategies.fsdp import FSDPStrategy -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn -from pytorch_lightning.utilities.types import LRSchedulerConfig +import lightning.pytorch as pl +from lightning.fabric.utilities.types import LRScheduler +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.strategies import DeepSpeedStrategy +from lightning.pytorch.strategies.fsdp import FSDPStrategy +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn +from lightning.pytorch.utilities.types import LRSchedulerConfig _AVG_FN = Callable[[Tensor, Tensor, Tensor], Tensor] diff --git a/src/pytorch_lightning/callbacks/timer.py b/src/lightning/pytorch/callbacks/timer.py similarity index 94% rename from src/pytorch_lightning/callbacks/timer.py rename to src/lightning/pytorch/callbacks/timer.py index 6e2b60223a..5cc27f3c3a 100644 --- a/src/pytorch_lightning/callbacks/timer.py +++ b/src/lightning/pytorch/callbacks/timer.py @@ -20,12 +20,12 @@ import time from datetime import timedelta from typing import Any, Dict, Optional, Union -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.trainer.states import RunningStage -from pytorch_lightning.utilities import LightningEnum -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_info +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.trainer.states import RunningStage +from lightning.pytorch.utilities import LightningEnum +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_info log = logging.getLogger(__name__) @@ -52,8 +52,8 @@ class Timer(Callback): Example:: - from pytorch_lightning import Trainer - from pytorch_lightning.callbacks import Timer + from lightning.pytorch import Trainer + from lightning.pytorch.callbacks import Timer # stop training after 12 hours timer = Timer(duration="00:12:00:00") diff --git a/src/pytorch_lightning/cli.py b/src/lightning/pytorch/cli.py similarity index 96% rename from src/pytorch_lightning/cli.py rename to src/lightning/pytorch/cli.py index d3f9f2a3e0..7e27cd8b75 100644 --- a/src/pytorch_lightning/cli.py +++ b/src/lightning/pytorch/cli.py @@ -22,13 +22,13 @@ from lightning_utilities.core.imports import RequirementCache from lightning_utilities.core.rank_zero import _warn from torch.optim import Optimizer -import pytorch_lightning as pl -from lightning_fabric.utilities.cloud_io import get_filesystem -from lightning_fabric.utilities.types import _TORCH_LRSCHEDULER -from pytorch_lightning import Callback, LightningDataModule, LightningModule, seed_everything, Trainer -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER +from lightning.pytorch import Callback, LightningDataModule, LightningModule, seed_everything, Trainer +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_warn _JSONARGPARSE_SIGNATURES_AVAILABLE = RequirementCache("jsonargparse[signatures]>=4.17.0") @@ -292,21 +292,21 @@ class LightningCLI: .. warning:: ``LightningCLI`` is in beta and subject to change. Args: - model_class: An optional :class:`~pytorch_lightning.core.module.LightningModule` class to train on or a - callable which returns a :class:`~pytorch_lightning.core.module.LightningModule` instance when + model_class: An optional :class:`~lightning.pytorch.core.module.LightningModule` class to train on or a + callable which returns a :class:`~lightning.pytorch.core.module.LightningModule` instance when called. If ``None``, you can pass a registered model with ``--model=MyModel``. - datamodule_class: An optional :class:`~pytorch_lightning.core.datamodule.LightningDataModule` class or a - callable which returns a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` instance when + datamodule_class: An optional :class:`~lightning.pytorch.core.datamodule.LightningDataModule` class or a + callable which returns a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` instance when called. If ``None``, you can pass a registered datamodule with ``--data=MyDataModule``. save_config_callback: A callback class to save the config. save_config_kwargs: Parameters that will be used to instantiate the save_config_callback. - trainer_class: An optional subclass of the :class:`~pytorch_lightning.trainer.trainer.Trainer` class or a - callable which returns a :class:`~pytorch_lightning.trainer.trainer.Trainer` instance when called. + trainer_class: An optional subclass of the :class:`~lightning.pytorch.trainer.trainer.Trainer` class or a + callable which returns a :class:`~lightning.pytorch.trainer.trainer.Trainer` instance when called. trainer_defaults: Set to override Trainer defaults or add persistent callbacks. The callbacks added through this argument will not be configurable from a configuration file and will always be present for this particular CLI. Alternatively, configurable callbacks can be added as explained in :ref:`the CLI docs `. - seed_everything_default: Number for the :func:`~lightning_fabric.utilities.seed.seed_everything` + seed_everything_default: Number for the :func:`~lightning.fabric.utilities.seed.seed_everything` seed value. Set to True to automatically choose a seed value. Setting it to False will avoid calling ``seed_everything``. parser_kwargs: Additional arguments to instantiate each ``LightningArgumentParser``. @@ -319,7 +319,7 @@ class LightningCLI: args: Arguments to parse. If ``None`` the arguments are taken from ``sys.argv``. Command line style arguments can be given in a ``list``. Alternatively, structured config options can be given in a ``dict`` or ``jsonargparse.Namespace``. - run: Whether subcommands should be added to run a :class:`~pytorch_lightning.trainer.trainer.Trainer` + run: Whether subcommands should be added to run a :class:`~lightning.pytorch.trainer.trainer.Trainer` method. If set to ``False``, the trainer and model classes will be instantiated only. """ self.save_config_callback = save_config_callback @@ -362,7 +362,7 @@ class LightningCLI: def init_parser(self, **kwargs: Any) -> LightningArgumentParser: """Method that instantiates the argument parser.""" - kwargs.setdefault("dump_header", [f"pytorch_lightning=={pl.__version__}"]) + kwargs.setdefault("dump_header", [f"lightning.pytorch=={pl.__version__}"]) parser = LightningArgumentParser(**kwargs) parser.add_argument( "-c", "--config", action=ActionConfigFile, help="Path to a configuration file in json or yaml format." @@ -549,7 +549,7 @@ class LightningCLI: def configure_optimizers( lightning_module: LightningModule, optimizer: Optimizer, lr_scheduler: Optional[LRSchedulerTypeUnion] = None ) -> Any: - """Override to customize the :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers` + """Override to customize the :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers` method. Args: @@ -567,7 +567,7 @@ class LightningCLI: return [optimizer], [lr_scheduler] def _add_configure_optimizers_method_to_model(self, subcommand: Optional[str]) -> None: - """Overrides the model's :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers` method + """Overrides the model's :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers` method if a single optimizer and optionally a scheduler argument groups are added to the parser as 'AUTOMATIC'.""" if not self.auto_configure_optimizers: return diff --git a/src/pytorch_lightning/core/__init__.py b/src/lightning/pytorch/core/__init__.py similarity index 84% rename from src/pytorch_lightning/core/__init__.py rename to src/lightning/pytorch/core/__init__.py index 4cfc736ee9..996219bee8 100644 --- a/src/pytorch_lightning/core/__init__.py +++ b/src/lightning/pytorch/core/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.core.datamodule import LightningDataModule -from pytorch_lightning.core.module import LightningModule +from lightning.pytorch.core.datamodule import LightningDataModule +from lightning.pytorch.core.module import LightningModule __all__ = ["LightningDataModule", "LightningModule"] diff --git a/src/pytorch_lightning/core/datamodule.py b/src/lightning/pytorch/core/datamodule.py similarity index 96% rename from src/pytorch_lightning/core/datamodule.py rename to src/lightning/pytorch/core/datamodule.py index 30f45fb475..d859715289 100644 --- a/src/pytorch_lightning/core/datamodule.py +++ b/src/lightning/pytorch/core/datamodule.py @@ -19,18 +19,18 @@ from typing import Any, Dict, IO, List, Mapping, Optional, Sequence, Tuple, Unio from torch.utils.data import DataLoader, Dataset, IterableDataset from typing_extensions import Self -import pytorch_lightning as pl -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.core.hooks import DataHooks -from pytorch_lightning.core.mixins import HyperparametersMixin -from pytorch_lightning.core.saving import _load_from_checkpoint -from pytorch_lightning.utilities.argparse import ( +import lightning.pytorch as pl +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.core.hooks import DataHooks +from lightning.pytorch.core.mixins import HyperparametersMixin +from lightning.pytorch.core.saving import _load_from_checkpoint +from lightning.pytorch.utilities.argparse import ( add_argparse_args, from_argparse_args, get_init_arguments_and_types, parse_argparser, ) -from pytorch_lightning.utilities.types import _ADD_ARGPARSE_RETURN, EVAL_DATALOADERS, TRAIN_DATALOADERS +from lightning.pytorch.utilities.types import _ADD_ARGPARSE_RETURN, EVAL_DATALOADERS, TRAIN_DATALOADERS class LightningDataModule(DataHooks, HyperparametersMixin): @@ -91,7 +91,7 @@ class LightningDataModule(DataHooks, HyperparametersMixin): Args: args: The parser or namespace to take arguments from. Only known arguments will be - parsed and passed to the :class:`~pytorch_lightning.core.datamodule.LightningDataModule`. + parsed and passed to the :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. **kwargs: Additional keyword arguments that may override ones in the parser or namespace. These must be valid DataModule arguments. diff --git a/src/pytorch_lightning/core/hooks.py b/src/lightning/pytorch/core/hooks.py similarity index 97% rename from src/pytorch_lightning/core/hooks.py rename to src/lightning/pytorch/core/hooks.py index acf59ed941..4823d00e34 100644 --- a/src/pytorch_lightning/core/hooks.py +++ b/src/lightning/pytorch/core/hooks.py @@ -19,9 +19,9 @@ import torch from torch import Tensor from torch.optim.optimizer import Optimizer -from pytorch_lightning.utilities import move_data_to_device -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.types import EVAL_DATALOADERS, STEP_OUTPUT, TRAIN_DATALOADERS +from lightning.pytorch.utilities import move_data_to_device +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.types import EVAL_DATALOADERS, STEP_OUTPUT, TRAIN_DATALOADERS class ModelHooks: @@ -233,7 +233,7 @@ class ModelHooks: """Called before ``optimizer.step()``. If using gradient accumulation, the hook is called once the gradients have been accumulated. - See: :paramref:`~pytorch_lightning.trainer.Trainer.accumulate_grad_batches`. + See: :paramref:`~lightning.pytorch.trainer.Trainer.accumulate_grad_batches`. If using AMP, the loss will be unscaled before calling this hook. See these `docs `__ @@ -378,7 +378,7 @@ class DataHooks: In the case of multiple dataloaders, please see this :ref:`section `. The dataloader you return will not be reloaded unless you set - :paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_n_epochs` to + :paramref:`~lightning.pytorch.trainer.Trainer.reload_dataloaders_every_n_epochs` to a positive integer. For data processing use the following pattern: @@ -390,7 +390,7 @@ class DataHooks: .. warning:: do not assign state in prepare_data - - :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` + - :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` - :meth:`prepare_data` - :meth:`setup` @@ -455,7 +455,7 @@ class DataHooks: .. warning:: do not assign state in prepare_data - - :meth:`~pytorch_lightning.trainer.trainer.Trainer.test` + - :meth:`~lightning.pytorch.trainer.trainer.Trainer.test` - :meth:`prepare_data` - :meth:`setup` @@ -500,13 +500,13 @@ class DataHooks: Implement one or multiple PyTorch DataLoaders for validation. The dataloader you return will not be reloaded unless you set - :paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_n_epochs` to + :paramref:`~lightning.pytorch.trainer.Trainer.reload_dataloaders_every_n_epochs` to a positive integer. It's recommended that all data downloads and preparation happen in :meth:`prepare_data`. - - :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` - - :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate` + - :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` + - :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate` - :meth:`prepare_data` - :meth:`setup` @@ -552,7 +552,7 @@ class DataHooks: It's recommended that all data downloads and preparation happen in :meth:`prepare_data`. - - :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict` + - :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict` - :meth:`prepare_data` - :meth:`setup` diff --git a/src/pytorch_lightning/core/mixins/__init__.py b/src/lightning/pytorch/core/mixins/__init__.py similarity index 90% rename from src/pytorch_lightning/core/mixins/__init__.py rename to src/lightning/pytorch/core/mixins/__init__.py index c91983aa61..f412a692e7 100644 --- a/src/pytorch_lightning/core/mixins/__init__.py +++ b/src/lightning/pytorch/core/mixins/__init__.py @@ -11,4 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.core.mixins.hparams_mixin import HyperparametersMixin # noqa: F401 +from lightning.pytorch.core.mixins.hparams_mixin import HyperparametersMixin # noqa: F401 diff --git a/src/pytorch_lightning/core/mixins/hparams_mixin.py b/src/lightning/pytorch/core/mixins/hparams_mixin.py similarity index 93% rename from src/pytorch_lightning/core/mixins/hparams_mixin.py rename to src/lightning/pytorch/core/mixins/hparams_mixin.py index 56ef099a78..9f9c1dcab3 100644 --- a/src/pytorch_lightning/core/mixins/hparams_mixin.py +++ b/src/lightning/pytorch/core/mixins/hparams_mixin.py @@ -17,8 +17,8 @@ import types from argparse import Namespace from typing import Any, List, MutableMapping, Optional, Sequence, Union -from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES -from pytorch_lightning.utilities.parsing import AttributeDict, save_hyperparameters +from lightning.pytorch.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES +from lightning.pytorch.utilities.parsing import AttributeDict, save_hyperparameters class HyperparametersMixin: @@ -47,7 +47,7 @@ class HyperparametersMixin: logger: Whether to send the hyperparameters to the logger. Default: True Example:: - >>> from pytorch_lightning.core.mixins import HyperparametersMixin + >>> from lightning.pytorch.core.mixins import HyperparametersMixin >>> class ManuallyArgsModel(HyperparametersMixin): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() @@ -60,7 +60,7 @@ class HyperparametersMixin: "arg1": 1 "arg3": 3.14 - >>> from pytorch_lightning.core.mixins import HyperparametersMixin + >>> from lightning.pytorch.core.mixins import HyperparametersMixin >>> class AutomaticArgsModel(HyperparametersMixin): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() @@ -74,7 +74,7 @@ class HyperparametersMixin: "arg2": abc "arg3": 3.14 - >>> from pytorch_lightning.core.mixins import HyperparametersMixin + >>> from lightning.pytorch.core.mixins import HyperparametersMixin >>> class SingleArgModel(HyperparametersMixin): ... def __init__(self, params): ... super().__init__() @@ -88,7 +88,7 @@ class HyperparametersMixin: "p2": abc "p3": 3.14 - >>> from pytorch_lightning.core.mixins import HyperparametersMixin + >>> from lightning.pytorch.core.mixins import HyperparametersMixin >>> class ManuallyArgsModel(HyperparametersMixin): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() diff --git a/src/pytorch_lightning/core/module.py b/src/lightning/pytorch/core/module.py similarity index 97% rename from src/pytorch_lightning/core/module.py rename to src/lightning/pytorch/core/module.py index 549166c2f2..a0b8871a3b 100644 --- a/src/pytorch_lightning/core/module.py +++ b/src/lightning/pytorch/core/module.py @@ -27,28 +27,28 @@ from torch.nn import Module from torch.optim.optimizer import Optimizer from torchmetrics import Metric, MetricCollection -import lightning_fabric as lf -import pytorch_lightning as pl -from lightning_fabric.loggers import Logger as FabricLogger -from lightning_fabric.utilities.apply_func import convert_to_tensors -from lightning_fabric.utilities.cloud_io import get_filesystem -from lightning_fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin -from lightning_fabric.utilities.distributed import _distributed_available, _sync_ddp -from lightning_fabric.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_11, _TORCH_GREATER_EQUAL_2_0 -from lightning_fabric.wrappers import _FabricOptimizer -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks -from pytorch_lightning.core.mixins import HyperparametersMixin -from pytorch_lightning.core.optimizer import LightningOptimizer -from pytorch_lightning.core.saving import ModelIO -from pytorch_lightning.loggers import Logger -from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator -from pytorch_lightning.utilities import GradClipAlgorithmType -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_13, _TORCHMETRICS_GREATER_EQUAL_0_9_1 -from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_warn, WarningCache -from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature -from pytorch_lightning.utilities.types import ( +import lightning.fabric as lf +import lightning.pytorch as pl +from lightning.fabric.loggers import Logger as FabricLogger +from lightning.fabric.utilities.apply_func import convert_to_tensors +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin +from lightning.fabric.utilities.distributed import _distributed_available, _sync_ddp +from lightning.fabric.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_11, _TORCH_GREATER_EQUAL_2_0 +from lightning.fabric.wrappers import _FabricOptimizer +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.core.hooks import CheckpointHooks, DataHooks, ModelHooks +from lightning.pytorch.core.mixins import HyperparametersMixin +from lightning.pytorch.core.optimizer import LightningOptimizer +from lightning.pytorch.core.saving import ModelIO +from lightning.pytorch.loggers import Logger +from lightning.pytorch.trainer.connectors.logger_connector.fx_validator import _FxValidator +from lightning.pytorch.utilities import GradClipAlgorithmType +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _TORCH_GREATER_EQUAL_1_13, _TORCHMETRICS_GREATER_EQUAL_0_9_1 +from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_warn, WarningCache +from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature +from lightning.pytorch.utilities.types import ( _METRIC, EPOCH_OUTPUT, LRSchedulerPLType, @@ -135,7 +135,7 @@ class LightningModule( Args: use_pl_optimizer: If ``True``, will wrap the optimizer(s) in a - :class:`~pytorch_lightning.core.optimizer.LightningOptimizer` for automatic handling of precision and + :class:`~lightning.pytorch.core.optimizer.LightningOptimizer` for automatic handling of precision and profiling. Returns: @@ -1168,16 +1168,16 @@ class LightningModule( """ def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any: - """Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. By default, it - calls :meth:`~pytorch_lightning.core.module.LightningModule.forward`. Override to add any processing logic. + """Step function called during :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`. By default, it + calls :meth:`~lightning.pytorch.core.module.LightningModule.forward`. Override to add any processing logic. - The :meth:`~pytorch_lightning.core.module.LightningModule.predict_step` is used + The :meth:`~lightning.pytorch.core.module.LightningModule.predict_step` is used to scale inference on multi-devices. - To prevent an OOM error, it is possible to use :class:`~pytorch_lightning.callbacks.BasePredictionWriter` + To prevent an OOM error, it is possible to use :class:`~lightning.pytorch.callbacks.BasePredictionWriter` callback to write the predictions to disk or database after each batch or on epoch end. - The :class:`~pytorch_lightning.callbacks.BasePredictionWriter` should be used while using a spawn + The :class:`~lightning.pytorch.callbacks.BasePredictionWriter` should be used while using a spawn based accelerator. This happens for ``Trainer(strategy="ddp_spawn")`` or training on 8 TPU cores with ``Trainer(accelerator="tpu", devices=8)`` as predictions won't be returned. @@ -1209,7 +1209,7 @@ class LightningModule( gets called, the list or a callback returned here will be merged with the list of callbacks passed to the Trainer's ``callbacks`` argument. If a callback returned here has the same type as one or several callbacks already present in the Trainer's callbacks list, it will take priority and replace them. In addition, - Lightning will make sure :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks + Lightning will make sure :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks run last. Return: @@ -1309,7 +1309,7 @@ class LightningModule( ) Metrics can be made available to monitor by simply logging it using - ``self.log('metric_to_track', metric_val)`` in your :class:`~pytorch_lightning.core.module.LightningModule`. + ``self.log('metric_to_track', metric_val)`` in your :class:`~lightning.pytorch.core.module.LightningModule`. Note: Some things to know: @@ -1504,7 +1504,7 @@ class LightningModule( def lr_scheduler_step(self, scheduler: LRSchedulerTypeUnion, metric: Optional[Any]) -> None: r""" Override this method to adjust the default way the - :class:`~pytorch_lightning.trainer.trainer.Trainer` calls each scheduler. + :class:`~lightning.pytorch.trainer.trainer.Trainer` calls each scheduler. By default, Lightning calls ``step()`` and as shown in the example for each scheduler based on its ``interval``. @@ -1539,7 +1539,7 @@ class LightningModule( optimizer_closure: Optional[Callable[[], Any]] = None, ) -> None: r""" - Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` calls + Override this method to adjust the default way the :class:`~lightning.pytorch.trainer.trainer.Trainer` calls the optimizer. By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example. @@ -1695,7 +1695,7 @@ class LightningModule( Note: - Requires the implementation of the - :meth:`~pytorch_lightning.core.module.LightningModule.forward` method. + :meth:`~lightning.pytorch.core.module.LightningModule.forward` method. - The exported script will be set to evaluation mode. - It is recommended that you install the latest supported version of PyTorch to use this feature without limitations. See also the :mod:`torch.jit` diff --git a/src/pytorch_lightning/core/optimizer.py b/src/lightning/pytorch/core/optimizer.py similarity index 97% rename from src/pytorch_lightning/core/optimizer.py rename to src/lightning/pytorch/core/optimizer.py index bd1c218304..606941a5db 100644 --- a/src/pytorch_lightning/core/optimizer.py +++ b/src/lightning/pytorch/core/optimizer.py @@ -20,12 +20,12 @@ import torch from torch import optim from torch.optim import Optimizer -import pytorch_lightning as pl -from lightning_fabric.utilities.types import _Stateful, Optimizable, ReduceLROnPlateau -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.types import LRSchedulerConfig, LRSchedulerTypeTuple +import lightning.pytorch as pl +from lightning.fabric.utilities.types import _Stateful, Optimizable, ReduceLROnPlateau +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.types import LRSchedulerConfig, LRSchedulerTypeTuple def do_nothing_closure() -> None: @@ -79,7 +79,7 @@ class LightningOptimizer: Setting `sync_grad` to False will block this synchronization and improve performance. """ # local import here to avoid circular import - from pytorch_lightning.loops.utilities import _block_parallel_sync_behavior + from lightning.pytorch.loops.utilities import _block_parallel_sync_behavior assert self._strategy is not None lightning_module = self._strategy.lightning_module diff --git a/src/pytorch_lightning/core/saving.py b/src/lightning/pytorch/core/saving.py similarity index 96% rename from src/pytorch_lightning/core/saving.py rename to src/lightning/pytorch/core/saving.py index 2b44d74d3d..5473adb267 100644 --- a/src/pytorch_lightning/core/saving.py +++ b/src/lightning/pytorch/core/saving.py @@ -28,15 +28,15 @@ import yaml from lightning_utilities.core.apply_func import apply_to_collection from typing_extensions import Self -import pytorch_lightning as pl -from lightning_fabric.utilities.cloud_io import _load as pl_load -from lightning_fabric.utilities.cloud_io import get_filesystem -from lightning_fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH -from pytorch_lightning.utilities import _OMEGACONF_AVAILABLE -from pytorch_lightning.utilities.migration import pl_legacy_patch -from pytorch_lightning.utilities.migration.utils import _pl_migrate_checkpoint -from pytorch_lightning.utilities.parsing import AttributeDict, parse_class_init_keys -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.utilities.cloud_io import _load as pl_load +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.types import _MAP_LOCATION_TYPE, _PATH +from lightning.pytorch.utilities import _OMEGACONF_AVAILABLE +from lightning.pytorch.utilities.migration import pl_legacy_patch +from lightning.pytorch.utilities.migration.utils import _pl_migrate_checkpoint +from lightning.pytorch.utilities.parsing import AttributeDict, parse_class_init_keys +from lightning.pytorch.utilities.rank_zero import rank_zero_warn log = logging.getLogger(__name__) PRIMITIVE_TYPES = (bool, int, float, str) diff --git a/src/pytorch_lightning/demos/__init__.py b/src/lightning/pytorch/demos/__init__.py similarity index 100% rename from src/pytorch_lightning/demos/__init__.py rename to src/lightning/pytorch/demos/__init__.py diff --git a/src/pytorch_lightning/demos/boring_classes.py b/src/lightning/pytorch/demos/boring_classes.py similarity index 96% rename from src/pytorch_lightning/demos/boring_classes.py rename to src/lightning/pytorch/demos/boring_classes.py index d125d494dd..39ef9966c1 100644 --- a/src/pytorch_lightning/demos/boring_classes.py +++ b/src/lightning/pytorch/demos/boring_classes.py @@ -20,10 +20,10 @@ from torch import Tensor from torch.optim import Optimizer from torch.utils.data import DataLoader, Dataset, IterableDataset, Subset -from lightning_fabric.utilities.types import _TORCH_LRSCHEDULER -from pytorch_lightning import LightningDataModule, LightningModule -from pytorch_lightning.core.optimizer import LightningOptimizer -from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT +from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER +from lightning.pytorch import LightningDataModule, LightningModule +from lightning.pytorch.core.optimizer import LightningOptimizer +from lightning.pytorch.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT class RandomDictDataset(Dataset): diff --git a/src/pytorch_lightning/demos/mnist_datamodule.py b/src/lightning/pytorch/demos/mnist_datamodule.py similarity index 98% rename from src/pytorch_lightning/demos/mnist_datamodule.py rename to src/lightning/pytorch/demos/mnist_datamodule.py index 254f760db3..992cfefe37 100644 --- a/src/pytorch_lightning/demos/mnist_datamodule.py +++ b/src/lightning/pytorch/demos/mnist_datamodule.py @@ -24,9 +24,9 @@ import torch from torch import Tensor from torch.utils.data import DataLoader, Dataset, random_split -from lightning_fabric.utilities.imports import _IS_WINDOWS -from pytorch_lightning import LightningDataModule -from pytorch_lightning.utilities.imports import _TORCHVISION_AVAILABLE +from lightning.fabric.utilities.imports import _IS_WINDOWS +from lightning.pytorch import LightningDataModule +from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE if _TORCHVISION_AVAILABLE: from torchvision import transforms as transform_lib diff --git a/src/pytorch_lightning/loggers/__init__.py b/src/lightning/pytorch/loggers/__init__.py similarity index 68% rename from src/pytorch_lightning/loggers/__init__.py rename to src/lightning/pytorch/loggers/__init__.py index 49f688f6ac..d79ba62d85 100644 --- a/src/pytorch_lightning/loggers/__init__.py +++ b/src/lightning/pytorch/loggers/__init__.py @@ -13,13 +13,13 @@ # limitations under the License. import os -from pytorch_lightning.loggers.comet import _COMET_AVAILABLE, CometLogger # noqa: F401 -from pytorch_lightning.loggers.csv_logs import CSVLogger -from pytorch_lightning.loggers.logger import Logger -from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE, MLFlowLogger # noqa: F401 -from pytorch_lightning.loggers.neptune import NeptuneLogger # noqa: F401 -from pytorch_lightning.loggers.tensorboard import TensorBoardLogger -from pytorch_lightning.loggers.wandb import WandbLogger # noqa: F401 +from lightning.pytorch.loggers.comet import _COMET_AVAILABLE, CometLogger # noqa: F401 +from lightning.pytorch.loggers.csv_logs import CSVLogger +from lightning.pytorch.loggers.logger import Logger +from lightning.pytorch.loggers.mlflow import _MLFLOW_AVAILABLE, MLFlowLogger # noqa: F401 +from lightning.pytorch.loggers.neptune import NeptuneLogger # noqa: F401 +from lightning.pytorch.loggers.tensorboard import TensorBoardLogger +from lightning.pytorch.loggers.wandb import WandbLogger # noqa: F401 __all__ = ["CSVLogger", "Logger", "TensorBoardLogger"] diff --git a/src/pytorch_lightning/loggers/comet.py b/src/lightning/pytorch/loggers/comet.py similarity index 95% rename from src/pytorch_lightning/loggers/comet.py rename to src/lightning/pytorch/loggers/comet.py index 6ce96f762d..2a5704220b 100644 --- a/src/pytorch_lightning/loggers/comet.py +++ b/src/lightning/pytorch/loggers/comet.py @@ -25,10 +25,10 @@ from lightning_utilities.core.imports import module_available from torch import Tensor from torch.nn import Module -from lightning_fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict -from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_only +from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict +from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_only log = logging.getLogger(__name__) _COMET_AVAILABLE = module_available("comet_ml") @@ -54,7 +54,7 @@ else: class CometLogger(Logger): r""" Track your parameters, metrics, source code and more using - `Comet `_. + `Comet `_. Install it with pip: @@ -69,8 +69,8 @@ class CometLogger(Logger): .. code-block:: python import os - from pytorch_lightning import Trainer - from pytorch_lightning.loggers import CometLogger + from lightning.pytorch import Trainer + from lightning.pytorch.loggers import CometLogger # arguments made to CometLogger are passed on to the comet_ml.Experiment class comet_logger = CometLogger( @@ -88,7 +88,7 @@ class CometLogger(Logger): .. code-block:: python - from pytorch_lightning.loggers import CometLogger + from lightning.pytorch.loggers import CometLogger # arguments made to CometLogger are passed on to the comet_ml.Experiment class comet_logger = CometLogger( @@ -102,7 +102,7 @@ class CometLogger(Logger): **Log Hyperparameters:** - Log parameters used to initialize a :class:`~pytorch_lightning.core.module.LightningModule`: + Log parameters used to initialize a :class:`~lightning.pytorch.core.module.LightningModule`: .. code-block:: python @@ -270,7 +270,7 @@ class CometLogger(Logger): def experiment(self) -> Union[CometExperiment, CometExistingExperiment, CometOfflineExperiment]: r""" Actual Comet object. To use Comet features in your - :class:`~pytorch_lightning.core.module.LightningModule` do the following. + :class:`~lightning.pytorch.core.module.LightningModule` do the following. Example:: diff --git a/src/pytorch_lightning/loggers/csv_logs.py b/src/lightning/pytorch/loggers/csv_logs.py similarity index 87% rename from src/pytorch_lightning/loggers/csv_logs.py rename to src/lightning/pytorch/loggers/csv_logs.py index bfa404ef22..9ab29f3432 100644 --- a/src/pytorch_lightning/loggers/csv_logs.py +++ b/src/lightning/pytorch/loggers/csv_logs.py @@ -23,14 +23,14 @@ import os from argparse import Namespace from typing import Any, Dict, Optional, Union -from lightning_fabric.loggers.csv_logs import _ExperimentWriter as _FabricExperimentWriter -from lightning_fabric.loggers.csv_logs import CSVLogger as FabricCSVLogger -from lightning_fabric.loggers.logger import rank_zero_experiment -from lightning_fabric.utilities.logger import _convert_params -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.core.saving import save_hparams_to_yaml -from pytorch_lightning.loggers.logger import Logger -from pytorch_lightning.utilities.rank_zero import rank_zero_only +from lightning.fabric.loggers.csv_logs import _ExperimentWriter as _FabricExperimentWriter +from lightning.fabric.loggers.csv_logs import CSVLogger as FabricCSVLogger +from lightning.fabric.loggers.logger import rank_zero_experiment +from lightning.fabric.utilities.logger import _convert_params +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.core.saving import save_hparams_to_yaml +from lightning.pytorch.loggers.logger import Logger +from lightning.pytorch.utilities.rank_zero import rank_zero_only log = logging.getLogger(__name__) @@ -70,8 +70,8 @@ class CSVLogger(Logger, FabricCSVLogger): Logs are saved to ``os.path.join(save_dir, name, version)``. Example: - >>> from pytorch_lightning import Trainer - >>> from pytorch_lightning.loggers import CSVLogger + >>> from lightning.pytorch import Trainer + >>> from lightning.pytorch.loggers import CSVLogger >>> logger = CSVLogger("logs", name="my_exp_name") >>> trainer = Trainer(logger=logger) @@ -144,7 +144,7 @@ class CSVLogger(Logger, FabricCSVLogger): r""" Actual _ExperimentWriter object. To use _ExperimentWriter features in your - :class:`~pytorch_lightning.core.module.LightningModule` do the following. + :class:`~lightning.pytorch.core.module.LightningModule` do the following. Example:: diff --git a/src/pytorch_lightning/loggers/logger.py b/src/lightning/pytorch/loggers/logger.py similarity index 94% rename from src/pytorch_lightning/loggers/logger.py rename to src/lightning/pytorch/loggers/logger.py index 29cf95db33..9ac163ff6b 100644 --- a/src/pytorch_lightning/loggers/logger.py +++ b/src/lightning/pytorch/loggers/logger.py @@ -22,10 +22,10 @@ from typing import Any, Callable, Dict, Mapping, Optional, Sequence import numpy as np -from lightning_fabric.loggers import Logger as FabricLogger -from lightning_fabric.loggers.logger import _DummyExperiment as DummyExperiment # for backward compatibility -from lightning_fabric.loggers.logger import rank_zero_experiment # noqa: F401 # for backward compatibility -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint +from lightning.fabric.loggers import Logger as FabricLogger +from lightning.fabric.loggers.logger import _DummyExperiment as DummyExperiment # for backward compatibility +from lightning.fabric.loggers.logger import rank_zero_experiment # noqa: F401 # for backward compatibility +from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint class Logger(FabricLogger, ABC): diff --git a/src/pytorch_lightning/loggers/mlflow.py b/src/lightning/pytorch/loggers/mlflow.py similarity index 94% rename from src/pytorch_lightning/loggers/mlflow.py rename to src/lightning/pytorch/loggers/mlflow.py index 1226362ab1..cfbc159460 100644 --- a/src/pytorch_lightning/loggers/mlflow.py +++ b/src/lightning/pytorch/loggers/mlflow.py @@ -28,11 +28,11 @@ import yaml from lightning_utilities.core.imports import RequirementCache from torch import Tensor -from lightning_fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint -from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment -from pytorch_lightning.utilities.logger import _scan_checkpoints -from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn +from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict +from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint +from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment +from lightning.pytorch.utilities.logger import _scan_checkpoints +from lightning.pytorch.utilities.rank_zero import rank_zero_only, rank_zero_warn log = logging.getLogger(__name__) LOCAL_FILE_URI_PREFIX = "file:" @@ -83,17 +83,17 @@ class MLFlowLogger(Logger): .. code-block:: python - from pytorch_lightning import Trainer - from pytorch_lightning.loggers import MLFlowLogger + from lightning.pytorch import Trainer + from lightning.pytorch.loggers import MLFlowLogger mlf_logger = MLFlowLogger(experiment_name="lightning_logs", tracking_uri="file:./ml-runs") trainer = Trainer(logger=mlf_logger) - Use the logger anywhere in your :class:`~pytorch_lightning.core.module.LightningModule` as follows: + Use the logger anywhere in your :class:`~lightning.pytorch.core.module.LightningModule` as follows: .. code-block:: python - from pytorch_lightning import LightningModule + from lightning.pytorch import LightningModule class LitModel(LightningModule): @@ -115,12 +115,12 @@ class MLFlowLogger(Logger): save_dir: A path to a local directory where the MLflow runs get saved. Defaults to `./mlflow` if `tracking_uri` is not provided. Has no effect if `tracking_uri` is provided. - log_model: Log checkpoints created by :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` + log_model: Log checkpoints created by :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` as MLFlow artifacts. * if ``log_model == 'all'``, checkpoints are logged during training. * if ``log_model == True``, checkpoints are logged at the end of training, except when - :paramref:`~pytorch_lightning.callbacks.Checkpoint.save_top_k` ``== -1`` + :paramref:`~lightning.pytorch.callbacks.Checkpoint.save_top_k` ``== -1`` which also logs every checkpoint during training. * if ``log_model == False`` (default), no checkpoint is logged. @@ -177,7 +177,7 @@ class MLFlowLogger(Logger): def experiment(self) -> MlflowClient: r""" Actual MLflow object. To use MLflow features in your - :class:`~pytorch_lightning.core.module.LightningModule` do the following. + :class:`~lightning.pytorch.core.module.LightningModule` do the following. Example:: diff --git a/src/pytorch_lightning/loggers/neptune.py b/src/lightning/pytorch/loggers/neptune.py similarity index 96% rename from src/pytorch_lightning/loggers/neptune.py rename to src/lightning/pytorch/loggers/neptune.py index 0616182c99..256409817c 100644 --- a/src/pytorch_lightning/loggers/neptune.py +++ b/src/lightning/pytorch/loggers/neptune.py @@ -27,12 +27,12 @@ from typing import Any, Dict, Generator, List, Optional, Set, Union from lightning_utilities.core.imports import RequirementCache from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.utilities.logger import _add_prefix, _convert_params, _sanitize_callable_params -from pytorch_lightning.callbacks import Checkpoint -from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment -from pytorch_lightning.utilities.model_summary import ModelSummary -from pytorch_lightning.utilities.rank_zero import rank_zero_only +import lightning.pytorch as pl +from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _sanitize_callable_params +from lightning.pytorch.callbacks import Checkpoint +from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment +from lightning.pytorch.utilities.model_summary import ModelSummary +from lightning.pytorch.utilities.rank_zero import rank_zero_only _NEPTUNE_AVAILABLE = RequirementCache("neptune-client") if _NEPTUNE_AVAILABLE: @@ -70,8 +70,8 @@ class NeptuneLogger(Logger): .. code-block:: python - from pytorch_lightning import Trainer - from pytorch_lightning.loggers import NeptuneLogger + from lightning.pytorch import Trainer + from lightning.pytorch.loggers import NeptuneLogger neptune_logger = NeptuneLogger( api_key="ANONYMOUS", # replace with your own @@ -82,12 +82,12 @@ class NeptuneLogger(Logger): **How to use NeptuneLogger?** - Use the logger anywhere in your :class:`~pytorch_lightning.core.module.LightningModule` as follows: + Use the logger anywhere in your :class:`~lightning.pytorch.core.module.LightningModule` as follows: .. code-block:: python from neptune.new.types import File - from pytorch_lightning import LightningModule + from lightning.pytorch import LightningModule class LitModel(LightningModule): @@ -137,7 +137,7 @@ class NeptuneLogger(Logger): **Log model checkpoints** - If you have :class:`~pytorch_lightning.callbacks.ModelCheckpoint` configured, + If you have :class:`~lightning.pytorch.callbacks.ModelCheckpoint` configured, Neptune logger automatically logs model checkpoints. Model weights will be uploaded to the: "model/checkpoints" namespace in the Neptune Run. You can disable this option: @@ -153,8 +153,8 @@ class NeptuneLogger(Logger): .. testcode:: :skipif: not _NEPTUNE_AVAILABLE - from pytorch_lightning import Trainer - from pytorch_lightning.loggers import NeptuneLogger + from lightning.pytorch import Trainer + from lightning.pytorch.loggers import NeptuneLogger neptune_logger = NeptuneLogger( project="common/pytorch-lightning-integration", @@ -326,7 +326,7 @@ class NeptuneLogger(Logger): def experiment(self) -> Run: r""" Actual Neptune run object. Allows you to use neptune logging features in your - :class:`~pytorch_lightning.core.module.LightningModule`. + :class:`~lightning.pytorch.core.module.LightningModule`. Example:: @@ -382,7 +382,7 @@ class NeptuneLogger(Logger): Example:: - from pytorch_lightning.loggers import NeptuneLogger + from lightning.pytorch.loggers import NeptuneLogger PARAMS = { "batch_size": 64, diff --git a/src/pytorch_lightning/loggers/tensorboard.py b/src/lightning/pytorch/loggers/tensorboard.py similarity index 93% rename from src/pytorch_lightning/loggers/tensorboard.py rename to src/lightning/pytorch/loggers/tensorboard.py index 25fb0d2617..9857144bcd 100644 --- a/src/pytorch_lightning/loggers/tensorboard.py +++ b/src/lightning/pytorch/loggers/tensorboard.py @@ -23,16 +23,16 @@ from typing import Any, Dict, Optional, Union from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE -from lightning_fabric.loggers.tensorboard import TensorBoardLogger as FabricTensorBoardLogger -from lightning_fabric.utilities.logger import _convert_params -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.core.saving import save_hparams_to_yaml -from pytorch_lightning.loggers.logger import Logger -from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE -from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE +from lightning.fabric.loggers.tensorboard import TensorBoardLogger as FabricTensorBoardLogger +from lightning.fabric.utilities.logger import _convert_params +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.callbacks import ModelCheckpoint +from lightning.pytorch.core.saving import save_hparams_to_yaml +from lightning.pytorch.loggers.logger import Logger +from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE +from lightning.pytorch.utilities.rank_zero import rank_zero_only, rank_zero_warn log = logging.getLogger(__name__) @@ -56,8 +56,8 @@ class TensorBoardLogger(Logger, FabricTensorBoardLogger): .. testcode:: - from pytorch_lightning import Trainer - from pytorch_lightning.loggers import TensorBoardLogger + from lightning.pytorch import Trainer + from lightning.pytorch.loggers import TensorBoardLogger logger = TensorBoardLogger("tb_logs", name="my_model") trainer = Trainer(logger=logger) diff --git a/src/pytorch_lightning/loggers/wandb.py b/src/lightning/pytorch/loggers/wandb.py similarity index 95% rename from src/pytorch_lightning/loggers/wandb.py rename to src/lightning/pytorch/loggers/wandb.py index f2fc0dcb58..0519f6c44b 100644 --- a/src/pytorch_lightning/loggers/wandb.py +++ b/src/lightning/pytorch/loggers/wandb.py @@ -24,13 +24,13 @@ import torch.nn as nn from lightning_utilities.core.imports import RequirementCache from torch import Tensor -from lightning_fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict, _sanitize_callable_params -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint -from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.logger import _scan_checkpoints -from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn +from lightning.fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict, _sanitize_callable_params +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint +from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.logger import _scan_checkpoints +from lightning.pytorch.utilities.rank_zero import rank_zero_only, rank_zero_warn try: import wandb @@ -61,7 +61,7 @@ class WandbLogger(Logger): .. code-block:: python - from pytorch_lightning.loggers import WandbLogger + from lightning.pytorch.loggers import WandbLogger wandb_logger = WandbLogger(project="MNIST") @@ -75,7 +75,7 @@ class WandbLogger(Logger): **Log metrics** - Log from :class:`~pytorch_lightning.core.module.LightningModule`: + Log from :class:`~lightning.pytorch.core.module.LightningModule`: .. code-block:: python @@ -91,7 +91,7 @@ class WandbLogger(Logger): **Log hyper-parameters** - Save :class:`~pytorch_lightning.core.module.LightningModule` parameters: + Save :class:`~lightning.pytorch.core.module.LightningModule` parameters: .. code-block:: python @@ -151,7 +151,7 @@ class WandbLogger(Logger): wandb_logger = WandbLogger(log_model="all") - Custom checkpointing can be set up through :class:`~pytorch_lightning.callbacks.ModelCheckpoint`: + Custom checkpointing can be set up through :class:`~lightning.pytorch.callbacks.ModelCheckpoint`: .. code-block:: python @@ -227,7 +227,7 @@ class WandbLogger(Logger): .. code-block:: python - from pytorch_lightning.loggers import WandbLogger + from lightning.pytorch.loggers import WandbLogger artifact_dir = WandbLogger.download_artifact(artifact="path/to/artifact") @@ -244,7 +244,7 @@ class WandbLogger(Logger): .. code-block:: python - from pytorch_lightning.loggers import WandbLogger + from lightning.pytorch.loggers import WandbLogger wandb_logger = WandbLogger(project="my_project", name="my_run") wandb_logger.use_artifact(artifact="path/to/artifact") @@ -262,12 +262,12 @@ class WandbLogger(Logger): id: Same as version. anonymous: Enables or explicitly disables anonymous logging. project: The name of the project to which this run will belong. - log_model: Log checkpoints created by :class:`~pytorch_lightning.callbacks.ModelCheckpoint` + log_model: Log checkpoints created by :class:`~lightning.pytorch.callbacks.ModelCheckpoint` as W&B artifacts. `latest` and `best` aliases are automatically set. * if ``log_model == 'all'``, checkpoints are logged during training. * if ``log_model == True``, checkpoints are logged at the end of training, except when - :paramref:`~pytorch_lightning.callbacks.ModelCheckpoint.save_top_k` ``== -1`` + :paramref:`~lightning.pytorch.callbacks.ModelCheckpoint.save_top_k` ``== -1`` which also logs every checkpoint during training. * if ``log_model == False`` (default), no checkpoint is logged. @@ -376,7 +376,7 @@ class WandbLogger(Logger): r""" Actual wandb object. To use wandb features in your - :class:`~pytorch_lightning.core.module.LightningModule` do the following. + :class:`~lightning.pytorch.core.module.LightningModule` do the following. Example:: diff --git a/src/pytorch_lightning/loops/__init__.py b/src/lightning/pytorch/loops/__init__.py similarity index 70% rename from src/pytorch_lightning/loops/__init__.py rename to src/lightning/pytorch/loops/__init__.py index 246d53bd47..5c4443dbac 100644 --- a/src/pytorch_lightning/loops/__init__.py +++ b/src/lightning/pytorch/loops/__init__.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.loops.loop import _Loop # noqa: F401 isort: skip (avoids circular imports) -from pytorch_lightning.loops.dataloader import _DataLoaderLoop, _EvaluationLoop, _PredictionLoop # noqa: F401 -from pytorch_lightning.loops.epoch import _EvaluationEpochLoop, _PredictionEpochLoop, _TrainingEpochLoop # noqa: F401 -from pytorch_lightning.loops.fit_loop import _FitLoop # noqa: F401 -from pytorch_lightning.loops.optimization import _ManualOptimization, _OptimizerLoop # noqa: F401 +from lightning.pytorch.loops.loop import _Loop # noqa: F401 isort: skip (avoids circular imports) +from lightning.pytorch.loops.dataloader import _DataLoaderLoop, _EvaluationLoop, _PredictionLoop # noqa: F401 +from lightning.pytorch.loops.epoch import _EvaluationEpochLoop, _PredictionEpochLoop, _TrainingEpochLoop # noqa: F401 +from lightning.pytorch.loops.fit_loop import _FitLoop # noqa: F401 +from lightning.pytorch.loops.optimization import _ManualOptimization, _OptimizerLoop # noqa: F401 diff --git a/src/pytorch_lightning/loops/dataloader/__init__.py b/src/lightning/pytorch/loops/dataloader/__init__.py similarity index 77% rename from src/pytorch_lightning/loops/dataloader/__init__.py rename to src/lightning/pytorch/loops/dataloader/__init__.py index a0bbac8e01..f37a5bbd8c 100644 --- a/src/pytorch_lightning/loops/dataloader/__init__.py +++ b/src/lightning/pytorch/loops/dataloader/__init__.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.loops.dataloader.dataloader_loop import _DataLoaderLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.evaluation_loop import _EvaluationLoop # noqa: F401 -from pytorch_lightning.loops.dataloader.prediction_loop import _PredictionLoop # noqa: F401 +from lightning.pytorch.loops.dataloader.dataloader_loop import _DataLoaderLoop # noqa: F401 +from lightning.pytorch.loops.dataloader.evaluation_loop import _EvaluationLoop # noqa: F401 +from lightning.pytorch.loops.dataloader.prediction_loop import _PredictionLoop # noqa: F401 diff --git a/src/pytorch_lightning/loops/dataloader/dataloader_loop.py b/src/lightning/pytorch/loops/dataloader/dataloader_loop.py similarity index 95% rename from src/pytorch_lightning/loops/dataloader/dataloader_loop.py rename to src/lightning/pytorch/loops/dataloader/dataloader_loop.py index 5037e97111..18c24cc713 100644 --- a/src/pytorch_lightning/loops/dataloader/dataloader_loop.py +++ b/src/lightning/pytorch/loops/dataloader/dataloader_loop.py @@ -17,8 +17,8 @@ from typing import Sequence from torch.utils.data import DataLoader -from pytorch_lightning.loops.loop import _Loop -from pytorch_lightning.loops.progress import DataLoaderProgress +from lightning.pytorch.loops.loop import _Loop +from lightning.pytorch.loops.progress import DataLoaderProgress class _DataLoaderLoop(_Loop): diff --git a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py b/src/lightning/pytorch/loops/dataloader/evaluation_loop.py similarity index 96% rename from src/pytorch_lightning/loops/dataloader/evaluation_loop.py rename to src/lightning/pytorch/loops/dataloader/evaluation_loop.py index cac778fdfc..9542f52f3e 100644 --- a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py +++ b/src/lightning/pytorch/loops/dataloader/evaluation_loop.py @@ -21,17 +21,17 @@ from lightning_utilities.core.apply_func import apply_to_collection from torch import Tensor from torch.utils.data.dataloader import DataLoader -import pytorch_lightning as pl -from pytorch_lightning.callbacks.progress.rich_progress import _RICH_AVAILABLE -from pytorch_lightning.loops.dataloader import _DataLoaderLoop -from pytorch_lightning.loops.epoch import _EvaluationEpochLoop -from pytorch_lightning.loops.utilities import _set_sampler_epoch -from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, _ResultCollection -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataFetcher, DataLoaderIterDataFetcher -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature -from pytorch_lightning.utilities.types import EPOCH_OUTPUT +import lightning.pytorch as pl +from lightning.pytorch.callbacks.progress.rich_progress import _RICH_AVAILABLE +from lightning.pytorch.loops.dataloader import _DataLoaderLoop +from lightning.pytorch.loops.epoch import _EvaluationEpochLoop +from lightning.pytorch.loops.utilities import _set_sampler_epoch +from lightning.pytorch.trainer.connectors.logger_connector.result import _OUT_DICT, _ResultCollection +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.fetching import AbstractDataFetcher, DataFetcher, DataLoaderIterDataFetcher +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature +from lightning.pytorch.utilities.types import EPOCH_OUTPUT if _RICH_AVAILABLE: from rich import get_console diff --git a/src/pytorch_lightning/loops/dataloader/prediction_loop.py b/src/lightning/pytorch/loops/dataloader/prediction_loop.py similarity index 94% rename from src/pytorch_lightning/loops/dataloader/prediction_loop.py rename to src/lightning/pytorch/loops/dataloader/prediction_loop.py index e779b13f2e..19b03686f7 100644 --- a/src/pytorch_lightning/loops/dataloader/prediction_loop.py +++ b/src/lightning/pytorch/loops/dataloader/prediction_loop.py @@ -2,12 +2,12 @@ from typing import Any, List, Optional, Sequence, Union from torch.utils.data import DataLoader -from pytorch_lightning.loops.dataloader.dataloader_loop import _DataLoaderLoop -from pytorch_lightning.loops.epoch.prediction_epoch_loop import _PredictionEpochLoop -from pytorch_lightning.loops.utilities import _set_sampler_epoch -from pytorch_lightning.strategies import DDPSpawnStrategy -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.types import _PREDICT_OUTPUT +from lightning.pytorch.loops.dataloader.dataloader_loop import _DataLoaderLoop +from lightning.pytorch.loops.epoch.prediction_epoch_loop import _PredictionEpochLoop +from lightning.pytorch.loops.utilities import _set_sampler_epoch +from lightning.pytorch.strategies import DDPSpawnStrategy +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.types import _PREDICT_OUTPUT class _PredictionLoop(_DataLoaderLoop): diff --git a/src/pytorch_lightning/loops/epoch/__init__.py b/src/lightning/pytorch/loops/epoch/__init__.py similarity index 77% rename from src/pytorch_lightning/loops/epoch/__init__.py rename to src/lightning/pytorch/loops/epoch/__init__.py index 0289626db9..c21eeda802 100644 --- a/src/pytorch_lightning/loops/epoch/__init__.py +++ b/src/lightning/pytorch/loops/epoch/__init__.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.loops.epoch.evaluation_epoch_loop import _EvaluationEpochLoop # noqa: F401 -from pytorch_lightning.loops.epoch.prediction_epoch_loop import _PredictionEpochLoop # noqa: F401 -from pytorch_lightning.loops.epoch.training_epoch_loop import _TrainingEpochLoop # noqa: F401 +from lightning.pytorch.loops.epoch.evaluation_epoch_loop import _EvaluationEpochLoop # noqa: F401 +from lightning.pytorch.loops.epoch.prediction_epoch_loop import _PredictionEpochLoop # noqa: F401 +from lightning.pytorch.loops.epoch.training_epoch_loop import _TrainingEpochLoop # noqa: F401 diff --git a/src/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py b/src/lightning/pytorch/loops/epoch/evaluation_epoch_loop.py similarity index 96% rename from src/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py rename to src/lightning/pytorch/loops/epoch/evaluation_epoch_loop.py index 58547d6a44..c74b3f84c2 100644 --- a/src/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py +++ b/src/lightning/pytorch/loops/epoch/evaluation_epoch_loop.py @@ -16,13 +16,13 @@ from collections import OrderedDict from functools import lru_cache from typing import Any, Optional, Union -from pytorch_lightning.loops.loop import _Loop -from pytorch_lightning.loops.progress import BatchProgress -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.exceptions import SIGTERMException -from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT +from lightning.pytorch.loops.loop import _Loop +from lightning.pytorch.loops.progress import BatchProgress +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.exceptions import SIGTERMException +from lightning.pytorch.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT class _EvaluationEpochLoop(_Loop): diff --git a/src/pytorch_lightning/loops/epoch/prediction_epoch_loop.py b/src/lightning/pytorch/loops/epoch/prediction_epoch_loop.py similarity index 95% rename from src/pytorch_lightning/loops/epoch/prediction_epoch_loop.py rename to src/lightning/pytorch/loops/epoch/prediction_epoch_loop.py index 84c87083f6..ef55c7b674 100644 --- a/src/pytorch_lightning/loops/epoch/prediction_epoch_loop.py +++ b/src/lightning/pytorch/loops/epoch/prediction_epoch_loop.py @@ -3,11 +3,11 @@ from typing import Any, Dict, Iterator, List, Tuple, Union import torch -from lightning_fabric.utilities import move_data_to_device -from pytorch_lightning.loops.loop import _Loop -from pytorch_lightning.loops.progress import Progress -from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper -from pytorch_lightning.utilities.rank_zero import WarningCache +from lightning.fabric.utilities import move_data_to_device +from lightning.pytorch.loops.loop import _Loop +from lightning.pytorch.loops.progress import Progress +from lightning.pytorch.overrides.distributed import IndexBatchSamplerWrapper +from lightning.pytorch.utilities.rank_zero import WarningCache warning_cache = WarningCache() @@ -168,7 +168,7 @@ class _PredictionEpochLoop(_Loop): def _get_batch_indices(self, dataloader_idx: int) -> List[List[int]]: """Returns a reference to the seen batch indices if the dataloader has a batch sampler wrapped by our - :class:`~pytorch_lightning.overrides.distributed.IndexBatchSamplerWrapper`.""" + :class:`~lightning.pytorch.overrides.distributed.IndexBatchSamplerWrapper`.""" # the batch_sampler is not be defined in case of CombinedDataLoaders assert self.trainer.predict_dataloaders batch_sampler = getattr( diff --git a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py b/src/lightning/pytorch/loops/epoch/training_epoch_loop.py similarity index 95% rename from src/pytorch_lightning/loops/epoch/training_epoch_loop.py rename to src/lightning/pytorch/loops/epoch/training_epoch_loop.py index a334e2672e..80af832451 100644 --- a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/src/lightning/pytorch/loops/epoch/training_epoch_loop.py @@ -17,18 +17,18 @@ from typing import Any, Dict, List, Optional, Union import torch -from pytorch_lightning import loops # import as loops to avoid circular imports -from pytorch_lightning.loops.optimization import _ManualOptimization, _OptimizerLoop -from pytorch_lightning.loops.optimization.manual_loop import _OUTPUTS_TYPE as _MANUAL_LOOP_OUTPUTS_TYPE -from pytorch_lightning.loops.optimization.optimizer_loop import _OUTPUTS_TYPE as _OPTIMIZER_LOOP_OUTPUTS_TYPE -from pytorch_lightning.loops.progress import BatchProgress, SchedulerProgress -from pytorch_lightning.loops.utilities import _is_max_limit_reached -from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection -from pytorch_lightning.utilities.exceptions import MisconfigurationException, SIGTERMException -from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_warn, WarningCache -from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature +from lightning.pytorch import loops # import as loops to avoid circular imports +from lightning.pytorch.loops.optimization import _ManualOptimization, _OptimizerLoop +from lightning.pytorch.loops.optimization.manual_loop import _OUTPUTS_TYPE as _MANUAL_LOOP_OUTPUTS_TYPE +from lightning.pytorch.loops.optimization.optimizer_loop import _OUTPUTS_TYPE as _OPTIMIZER_LOOP_OUTPUTS_TYPE +from lightning.pytorch.loops.progress import BatchProgress, SchedulerProgress +from lightning.pytorch.loops.utilities import _is_max_limit_reached +from lightning.pytorch.trainer.connectors.logger_connector.result import _ResultCollection +from lightning.pytorch.utilities.exceptions import MisconfigurationException, SIGTERMException +from lightning.pytorch.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_warn, WarningCache +from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature _BATCH_OUTPUTS_TYPE = Optional[Union[_OPTIMIZER_LOOP_OUTPUTS_TYPE, _MANUAL_LOOP_OUTPUTS_TYPE]] _OUTPUTS_TYPE = List[_BATCH_OUTPUTS_TYPE] @@ -37,19 +37,19 @@ _OUTPUTS_TYPE = List[_BATCH_OUTPUTS_TYPE] class _TrainingEpochLoop(loops._Loop): """ Iterates over all batches in the dataloader (one epoch) that the user returns in their - :meth:`~pytorch_lightning.core.module.LightningModule.train_dataloader` method. + :meth:`~lightning.pytorch.core.module.LightningModule.train_dataloader` method. Its main responsibilities are calling the ``*_epoch_{start,end}`` hooks, accumulating outputs if the user request them in one of these hooks, and running validation at the requested interval. The validation is carried out by yet another loop, - :class:`~pytorch_lightning.loops.epoch.validation_epoch_loop.ValidationEpochLoop`. + :class:`~lightning.pytorch.loops.epoch.validation_epoch_loop.ValidationEpochLoop`. In the ``run()`` method, the training epoch loop could in theory simply call the ``LightningModule.training_step`` already and perform the optimization. However, Lightning has built-in support for automatic optimization with multiple optimizers. For this reason there are actually two more loops nested under - :class:`~pytorch_lightning.loops.epoch.training_epoch_loop.TrainingEpochLoop`. + :class:`~lightning.pytorch.loops.epoch.training_epoch_loop.TrainingEpochLoop`. Args: min_steps: The minimum number of steps (batches) to process diff --git a/src/pytorch_lightning/loops/fit_loop.py b/src/lightning/pytorch/loops/fit_loop.py similarity index 95% rename from src/pytorch_lightning/loops/fit_loop.py rename to src/lightning/pytorch/loops/fit_loop.py index 9fc3ac78a4..06140dfaa0 100644 --- a/src/pytorch_lightning/loops/fit_loop.py +++ b/src/lightning/pytorch/loops/fit_loop.py @@ -14,19 +14,19 @@ import logging from typing import Any, Optional, Type -import pytorch_lightning as pl -from pytorch_lightning.loops import _Loop -from pytorch_lightning.loops.epoch import _TrainingEpochLoop -from pytorch_lightning.loops.epoch.training_epoch_loop import _OUTPUTS_TYPE as _EPOCH_OUTPUTS_TYPE -from pytorch_lightning.loops.progress import Progress -from pytorch_lightning.loops.utilities import _is_max_limit_reached, _set_sampler_epoch -from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection -from pytorch_lightning.trainer.supporters import CombinedLoader -from pytorch_lightning.utilities.exceptions import MisconfigurationException, SIGTERMException -from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataFetcher, DataLoaderIterDataFetcher -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_warn -from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature +import lightning.pytorch as pl +from lightning.pytorch.loops import _Loop +from lightning.pytorch.loops.epoch import _TrainingEpochLoop +from lightning.pytorch.loops.epoch.training_epoch_loop import _OUTPUTS_TYPE as _EPOCH_OUTPUTS_TYPE +from lightning.pytorch.loops.progress import Progress +from lightning.pytorch.loops.utilities import _is_max_limit_reached, _set_sampler_epoch +from lightning.pytorch.trainer.connectors.logger_connector.result import _ResultCollection +from lightning.pytorch.trainer.supporters import CombinedLoader +from lightning.pytorch.utilities.exceptions import MisconfigurationException, SIGTERMException +from lightning.pytorch.utilities.fetching import AbstractDataFetcher, DataFetcher, DataLoaderIterDataFetcher +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_warn +from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/loops/loop.py b/src/lightning/pytorch/loops/loop.py similarity index 97% rename from src/pytorch_lightning/loops/loop.py rename to src/lightning/pytorch/loops/loop.py index 766a0de1d5..974358a105 100644 --- a/src/pytorch_lightning/loops/loop.py +++ b/src/lightning/pytorch/loops/loop.py @@ -13,8 +13,8 @@ # limitations under the License. from typing import Dict, Optional -import pytorch_lightning as pl -from pytorch_lightning.loops.progress import BaseProgress +import lightning.pytorch as pl +from lightning.pytorch.loops.progress import BaseProgress class _Loop: diff --git a/src/pytorch_lightning/loops/optimization/__init__.py b/src/lightning/pytorch/loops/optimization/__init__.py similarity index 83% rename from src/pytorch_lightning/loops/optimization/__init__.py rename to src/lightning/pytorch/loops/optimization/__init__.py index 5e150c8fa1..5fe6bf5260 100644 --- a/src/pytorch_lightning/loops/optimization/__init__.py +++ b/src/lightning/pytorch/loops/optimization/__init__.py @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.loops.optimization.manual_loop import _ManualOptimization # noqa: F401 -from pytorch_lightning.loops.optimization.optimizer_loop import _OptimizerLoop # noqa: F401 +from lightning.pytorch.loops.optimization.manual_loop import _ManualOptimization # noqa: F401 +from lightning.pytorch.loops.optimization.optimizer_loop import _OptimizerLoop # noqa: F401 diff --git a/src/pytorch_lightning/loops/optimization/closure.py b/src/lightning/pytorch/loops/optimization/closure.py similarity index 97% rename from src/pytorch_lightning/loops/optimization/closure.py rename to src/lightning/pytorch/loops/optimization/closure.py index 54d9fab22d..ee643857a4 100644 --- a/src/pytorch_lightning/loops/optimization/closure.py +++ b/src/lightning/pytorch/loops/optimization/closure.py @@ -15,7 +15,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Dict, Generic, Optional, TypeVar -from pytorch_lightning.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.exceptions import MisconfigurationException T = TypeVar("T") diff --git a/src/pytorch_lightning/loops/optimization/manual_loop.py b/src/lightning/pytorch/loops/optimization/manual_loop.py similarity index 90% rename from src/pytorch_lightning/loops/optimization/manual_loop.py rename to src/lightning/pytorch/loops/optimization/manual_loop.py index 0f38873324..50728ee166 100644 --- a/src/pytorch_lightning/loops/optimization/manual_loop.py +++ b/src/lightning/pytorch/loops/optimization/manual_loop.py @@ -18,19 +18,19 @@ from typing import Any, Dict, Optional from torch import Tensor -from pytorch_lightning.core.optimizer import do_nothing_closure -from pytorch_lightning.loops import _Loop -from pytorch_lightning.loops.optimization.closure import OutputResult -from pytorch_lightning.loops.progress import Progress, ReadyCompletedTracker -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.pytorch.core.optimizer import do_nothing_closure +from lightning.pytorch.loops import _Loop +from lightning.pytorch.loops.optimization.closure import OutputResult +from lightning.pytorch.loops.progress import Progress, ReadyCompletedTracker +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.types import STEP_OUTPUT @dataclass class ManualResult(OutputResult): """A container to hold the result returned by the ``ManualLoop``. - It is created from the output of :meth:`~pytorch_lightning.core.module.LightningModule.training_step`. + It is created from the output of :meth:`~lightning.pytorch.core.module.LightningModule.training_step`. Attributes: extra: Anything returned by the ``training_step``. @@ -66,11 +66,11 @@ _OUTPUTS_TYPE = Dict[str, Any] class _ManualOptimization(_Loop): """A special loop implementing what is known in Lightning as Manual Optimization where the optimization happens - entirely in the :meth:`~pytorch_lightning.core.module.LightningModule.training_step` and therefore the user is + entirely in the :meth:`~lightning.pytorch.core.module.LightningModule.training_step` and therefore the user is responsible for back-propagating gradients and making calls to the optimizers. This loop is a trivial case because it performs only a single iteration (calling directly into the module's - :meth:`~pytorch_lightning.core.module.LightningModule.training_step`) and passing through the output(s). + :meth:`~lightning.pytorch.core.module.LightningModule.training_step`) and passing through the output(s). """ output_result_cls = ManualResult diff --git a/src/pytorch_lightning/loops/optimization/optimizer_loop.py b/src/lightning/pytorch/loops/optimization/optimizer_loop.py similarity index 95% rename from src/pytorch_lightning/loops/optimization/optimizer_loop.py rename to src/lightning/pytorch/loops/optimization/optimizer_loop.py index 686ed36239..4cc25f8649 100644 --- a/src/pytorch_lightning/loops/optimization/optimizer_loop.py +++ b/src/lightning/pytorch/loops/optimization/optimizer_loop.py @@ -19,21 +19,21 @@ import torch from torch import Tensor from torch.optim import Optimizer -from pytorch_lightning.core.optimizer import LightningOptimizer -from pytorch_lightning.loops.loop import _Loop -from pytorch_lightning.loops.optimization.closure import AbstractClosure, OutputResult -from pytorch_lightning.loops.progress import OptimizationProgress -from pytorch_lightning.loops.utilities import _block_parallel_sync_behavior -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import WarningCache -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.pytorch.core.optimizer import LightningOptimizer +from lightning.pytorch.loops.loop import _Loop +from lightning.pytorch.loops.optimization.closure import AbstractClosure, OutputResult +from lightning.pytorch.loops.progress import OptimizationProgress +from lightning.pytorch.loops.utilities import _block_parallel_sync_behavior +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import WarningCache +from lightning.pytorch.utilities.types import STEP_OUTPUT @dataclass class ClosureResult(OutputResult): """A container to hold the result of a :class:`Closure` call. - It is created from the output of :meth:`~pytorch_lightning.core.module.LightningModule.training_step`. + It is created from the output of :meth:`~lightning.pytorch.core.module.LightningModule.training_step`. Attributes: closure_loss: The loss with a graph attached. @@ -95,7 +95,7 @@ class Closure(AbstractClosure[ClosureResult]): do something with the output. Args: - step_fn: This is typically the :meth:`pytorch_lightning.core.module.LightningModule.training_step + step_fn: This is typically the :meth:`lightning.pytorch.core.module.LightningModule.training_step wrapped with processing for its outputs backward_fn: A function that takes a loss value as input, performs back-propagation and returns the loss value. Can be set to ``None`` to skip the backward operation. diff --git a/src/pytorch_lightning/loops/progress.py b/src/lightning/pytorch/loops/progress.py similarity index 100% rename from src/pytorch_lightning/loops/progress.py rename to src/lightning/pytorch/loops/progress.py diff --git a/src/pytorch_lightning/loops/utilities.py b/src/lightning/pytorch/loops/utilities.py similarity index 88% rename from src/pytorch_lightning/loops/utilities.py rename to src/lightning/pytorch/loops/utilities.py index c8d61fc357..d2a7c863e7 100644 --- a/src/pytorch_lightning/loops/utilities.py +++ b/src/lightning/pytorch/loops/utilities.py @@ -18,15 +18,15 @@ import torch from torch import Tensor from torch.utils.data import DataLoader -import pytorch_lightning as pl -from lightning_fabric.utilities.warnings import PossibleUserWarning -from pytorch_lightning.callbacks.timer import Timer -from pytorch_lightning.loops import _Loop -from pytorch_lightning.loops.progress import BaseProgress -from pytorch_lightning.strategies.parallel import ParallelStrategy -from pytorch_lightning.strategies.strategy import Strategy -from pytorch_lightning.trainer.supporters import CombinedLoader -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.utilities.warnings import PossibleUserWarning +from lightning.pytorch.callbacks.timer import Timer +from lightning.pytorch.loops import _Loop +from lightning.pytorch.loops.progress import BaseProgress +from lightning.pytorch.strategies.parallel import ParallelStrategy +from lightning.pytorch.strategies.strategy import Strategy +from lightning.pytorch.trainer.supporters import CombinedLoader +from lightning.pytorch.utilities.rank_zero import rank_zero_warn def check_finite_loss(loss: Optional[Tensor]) -> None: @@ -83,7 +83,7 @@ def _parse_loop_limits( @contextmanager def _block_parallel_sync_behavior(strategy: Strategy, block: bool = True) -> Generator[None, None, None]: - """Blocks synchronization in :class:`~pytorch_lightning.strategies.parallel.ParallelStrategy`. This is useful + """Blocks synchronization in :class:`~lightning.pytorch.strategies.parallel.ParallelStrategy`. This is useful for example when accumulating gradients to reduce communication when it is not needed. Args: diff --git a/src/pytorch_lightning/overrides/__init__.py b/src/lightning/pytorch/overrides/__init__.py similarity index 100% rename from src/pytorch_lightning/overrides/__init__.py rename to src/lightning/pytorch/overrides/__init__.py diff --git a/src/pytorch_lightning/overrides/base.py b/src/lightning/pytorch/overrides/base.py similarity index 98% rename from src/pytorch_lightning/overrides/base.py rename to src/lightning/pytorch/overrides/base.py index a7375afc79..36568929b3 100644 --- a/src/pytorch_lightning/overrides/base.py +++ b/src/lightning/pytorch/overrides/base.py @@ -15,8 +15,8 @@ from typing import Any, Union import torch -import pytorch_lightning as pl -from lightning_fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin +import lightning.pytorch as pl +from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin class _LightningPrecisionModuleWrapperBase(_DeviceDtypeModuleMixin, torch.nn.Module): diff --git a/src/pytorch_lightning/overrides/data_parallel.py b/src/lightning/pytorch/overrides/data_parallel.py similarity index 96% rename from src/pytorch_lightning/overrides/data_parallel.py rename to src/lightning/pytorch/overrides/data_parallel.py index b63ccdc8a7..1a89715e33 100644 --- a/src/pytorch_lightning/overrides/data_parallel.py +++ b/src/lightning/pytorch/overrides/data_parallel.py @@ -19,9 +19,9 @@ import torch from lightning_utilities.core.apply_func import apply_to_collection from torch import Tensor -import pytorch_lightning as pl -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase +from lightning.pytorch.utilities.rank_zero import rank_zero_warn def _ignore_scalar_return_in_dp() -> None: diff --git a/src/pytorch_lightning/overrides/distributed.py b/src/lightning/pytorch/overrides/distributed.py similarity index 98% rename from src/pytorch_lightning/overrides/distributed.py rename to src/lightning/pytorch/overrides/distributed.py index f130880d58..019cde8a07 100644 --- a/src/pytorch_lightning/overrides/distributed.py +++ b/src/lightning/pytorch/overrides/distributed.py @@ -19,7 +19,7 @@ from torch import Tensor from torch.nn.parallel import DistributedDataParallel from torch.utils.data import BatchSampler, DistributedSampler, Sampler -from lightning_fabric.utilities.distributed import _DatasetSamplerWrapper +from lightning.fabric.utilities.distributed import _DatasetSamplerWrapper def _find_tensors( diff --git a/src/pytorch_lightning/overrides/torch_distributed.py b/src/lightning/pytorch/overrides/torch_distributed.py similarity index 100% rename from src/pytorch_lightning/overrides/torch_distributed.py rename to src/lightning/pytorch/overrides/torch_distributed.py diff --git a/src/lightning/pytorch/plugins/__init__.py b/src/lightning/pytorch/plugins/__init__.py new file mode 100644 index 0000000000..5bb9eddc1c --- /dev/null +++ b/src/lightning/pytorch/plugins/__init__.py @@ -0,0 +1,39 @@ +from typing import Union + +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment, TorchCheckpointIO, XLACheckpointIO +from lightning.pytorch.plugins.io.async_plugin import AsyncCheckpointIO +from lightning.pytorch.plugins.io.hpu_plugin import HPUCheckpointIO +from lightning.pytorch.plugins.layer_sync import LayerSync, TorchSyncBatchNorm +from lightning.pytorch.plugins.precision.amp import MixedPrecisionPlugin +from lightning.pytorch.plugins.precision.colossalai import ColossalAIPrecisionPlugin +from lightning.pytorch.plugins.precision.deepspeed import DeepSpeedPrecisionPlugin +from lightning.pytorch.plugins.precision.double import DoublePrecisionPlugin +from lightning.pytorch.plugins.precision.fsdp import FSDPMixedPrecisionPlugin +from lightning.pytorch.plugins.precision.hpu import HPUPrecisionPlugin +from lightning.pytorch.plugins.precision.ipu import IPUPrecisionPlugin +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.plugins.precision.tpu import TPUPrecisionPlugin +from lightning.pytorch.plugins.precision.tpu_bf16 import TPUBf16PrecisionPlugin + +PLUGIN = Union[PrecisionPlugin, ClusterEnvironment, CheckpointIO, LayerSync] +PLUGIN_INPUT = Union[PLUGIN, str] + +__all__ = [ + "AsyncCheckpointIO", + "CheckpointIO", + "TorchCheckpointIO", + "XLACheckpointIO", + "HPUCheckpointIO", + "ColossalAIPrecisionPlugin", + "DeepSpeedPrecisionPlugin", + "DoublePrecisionPlugin", + "IPUPrecisionPlugin", + "HPUPrecisionPlugin", + "MixedPrecisionPlugin", + "PrecisionPlugin", + "FSDPMixedPrecisionPlugin", + "TPUPrecisionPlugin", + "TPUBf16PrecisionPlugin", + "LayerSync", + "TorchSyncBatchNorm", +] diff --git a/src/pytorch_lightning/plugins/environments/__init__.py b/src/lightning/pytorch/plugins/environments/__init__.py similarity index 79% rename from src/pytorch_lightning/plugins/environments/__init__.py rename to src/lightning/pytorch/plugins/environments/__init__.py index 0d6d6936f0..38ad63f754 100644 --- a/src/pytorch_lightning/plugins/environments/__init__.py +++ b/src/lightning/pytorch/plugins/environments/__init__.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from lightning_fabric.plugins import ClusterEnvironment # noqa: F401 -from lightning_fabric.plugins.environments import ( # noqa: F401 +from lightning.fabric.plugins import ClusterEnvironment # noqa: F401 +from lightning.fabric.plugins.environments import ( # noqa: F401 KubeflowEnvironment, LightningEnvironment, LSFEnvironment, @@ -20,4 +20,4 @@ from lightning_fabric.plugins.environments import ( # noqa: F401 TorchElasticEnvironment, XLAEnvironment, ) -from pytorch_lightning.plugins.environments.bagua_environment import BaguaEnvironment # noqa: F401 +from lightning.pytorch.plugins.environments.bagua_environment import BaguaEnvironment # noqa: F401 diff --git a/src/pytorch_lightning/plugins/environments/bagua_environment.py b/src/lightning/pytorch/plugins/environments/bagua_environment.py similarity index 97% rename from src/pytorch_lightning/plugins/environments/bagua_environment.py rename to src/lightning/pytorch/plugins/environments/bagua_environment.py index c64d75da9d..77339df0d6 100644 --- a/src/pytorch_lightning/plugins/environments/bagua_environment.py +++ b/src/lightning/pytorch/plugins/environments/bagua_environment.py @@ -15,7 +15,7 @@ import logging import os -from lightning_fabric.plugins import ClusterEnvironment +from lightning.fabric.plugins import ClusterEnvironment log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/plugins/io/__init__.py b/src/lightning/pytorch/plugins/io/__init__.py similarity index 78% rename from src/pytorch_lightning/plugins/io/__init__.py rename to src/lightning/pytorch/plugins/io/__init__.py index 7bf5d36d44..af07ea1c20 100644 --- a/src/pytorch_lightning/plugins/io/__init__.py +++ b/src/lightning/pytorch/plugins/io/__init__.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from lightning_fabric.plugins import CheckpointIO, TorchCheckpointIO, XLACheckpointIO -from pytorch_lightning.plugins.io.async_plugin import AsyncCheckpointIO -from pytorch_lightning.plugins.io.hpu_plugin import HPUCheckpointIO +from lightning.fabric.plugins import CheckpointIO, TorchCheckpointIO, XLACheckpointIO +from lightning.pytorch.plugins.io.async_plugin import AsyncCheckpointIO +from lightning.pytorch.plugins.io.hpu_plugin import HPUCheckpointIO __all__ = ["AsyncCheckpointIO", "CheckpointIO", "HPUCheckpointIO", "TorchCheckpointIO", "XLACheckpointIO"] diff --git a/src/pytorch_lightning/plugins/io/async_plugin.py b/src/lightning/pytorch/plugins/io/async_plugin.py similarity index 95% rename from src/pytorch_lightning/plugins/io/async_plugin.py rename to src/lightning/pytorch/plugins/io/async_plugin.py index e464d4e352..29d917e151 100644 --- a/src/pytorch_lightning/plugins/io/async_plugin.py +++ b/src/lightning/pytorch/plugins/io/async_plugin.py @@ -15,8 +15,8 @@ from concurrent.futures import ThreadPoolExecutor from typing import Any, Optional -from lightning_fabric.plugins import CheckpointIO -from pytorch_lightning.plugins.io.wrapper import _WrappingCheckpointIO +from lightning.fabric.plugins import CheckpointIO +from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO class AsyncCheckpointIO(_WrappingCheckpointIO): diff --git a/src/pytorch_lightning/plugins/io/checkpoint_plugin.py b/src/lightning/pytorch/plugins/io/checkpoint_plugin.py similarity index 90% rename from src/pytorch_lightning/plugins/io/checkpoint_plugin.py rename to src/lightning/pytorch/plugins/io/checkpoint_plugin.py index 7e216dc650..20b7804486 100644 --- a/src/pytorch_lightning/plugins/io/checkpoint_plugin.py +++ b/src/lightning/pytorch/plugins/io/checkpoint_plugin.py @@ -13,4 +13,4 @@ # limitations under the License. # For backward-compatibility -from lightning_fabric.plugins import CheckpointIO # noqa: F401 +from lightning.fabric.plugins import CheckpointIO # noqa: F401 diff --git a/src/pytorch_lightning/plugins/io/hpu_plugin.py b/src/lightning/pytorch/plugins/io/hpu_plugin.py similarity index 89% rename from src/pytorch_lightning/plugins/io/hpu_plugin.py rename to src/lightning/pytorch/plugins/io/hpu_plugin.py index 7616ee1ad8..b800303398 100644 --- a/src/pytorch_lightning/plugins/io/hpu_plugin.py +++ b/src/lightning/pytorch/plugins/io/hpu_plugin.py @@ -17,10 +17,10 @@ from typing import Any, Dict, Optional import torch -from lightning_fabric.plugins import TorchCheckpointIO -from lightning_fabric.utilities import move_data_to_device -from lightning_fabric.utilities.cloud_io import _atomic_save, get_filesystem -from lightning_fabric.utilities.types import _PATH +from lightning.fabric.plugins import TorchCheckpointIO +from lightning.fabric.utilities import move_data_to_device +from lightning.fabric.utilities.cloud_io import _atomic_save, get_filesystem +from lightning.fabric.utilities.types import _PATH class HPUCheckpointIO(TorchCheckpointIO): diff --git a/src/pytorch_lightning/plugins/io/torch_plugin.py b/src/lightning/pytorch/plugins/io/torch_plugin.py similarity index 90% rename from src/pytorch_lightning/plugins/io/torch_plugin.py rename to src/lightning/pytorch/plugins/io/torch_plugin.py index 2a64b9664c..2f1c18c8b5 100644 --- a/src/pytorch_lightning/plugins/io/torch_plugin.py +++ b/src/lightning/pytorch/plugins/io/torch_plugin.py @@ -13,4 +13,4 @@ # limitations under the License. # For backward-compatibility -from lightning_fabric.plugins import TorchCheckpointIO # noqa: F401 +from lightning.fabric.plugins import TorchCheckpointIO # noqa: F401 diff --git a/src/pytorch_lightning/plugins/io/wrapper.py b/src/lightning/pytorch/plugins/io/wrapper.py similarity index 98% rename from src/pytorch_lightning/plugins/io/wrapper.py rename to src/lightning/pytorch/plugins/io/wrapper.py index edc5d0eb8d..fbd709a05e 100644 --- a/src/pytorch_lightning/plugins/io/wrapper.py +++ b/src/lightning/pytorch/plugins/io/wrapper.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Dict, Optional -from lightning_fabric.plugins import CheckpointIO +from lightning.fabric.plugins import CheckpointIO class _WrappingCheckpointIO(CheckpointIO): diff --git a/src/pytorch_lightning/plugins/io/xla_plugin.py b/src/lightning/pytorch/plugins/io/xla_plugin.py similarity index 90% rename from src/pytorch_lightning/plugins/io/xla_plugin.py rename to src/lightning/pytorch/plugins/io/xla_plugin.py index 74fa927248..ce97b7d0c0 100644 --- a/src/pytorch_lightning/plugins/io/xla_plugin.py +++ b/src/lightning/pytorch/plugins/io/xla_plugin.py @@ -13,4 +13,4 @@ # limitations under the License. # For backward-compatibility -from lightning_fabric.plugins import XLACheckpointIO # noqa: F401 +from lightning.fabric.plugins import XLACheckpointIO # noqa: F401 diff --git a/src/pytorch_lightning/plugins/layer_sync.py b/src/lightning/pytorch/plugins/layer_sync.py similarity index 100% rename from src/pytorch_lightning/plugins/layer_sync.py rename to src/lightning/pytorch/plugins/layer_sync.py diff --git a/src/pytorch_lightning/plugins/precision/__init__.py b/src/lightning/pytorch/plugins/precision/__init__.py similarity index 61% rename from src/pytorch_lightning/plugins/precision/__init__.py rename to src/lightning/pytorch/plugins/precision/__init__.py index 5440c8d345..3053444fcf 100644 --- a/src/pytorch_lightning/plugins/precision/__init__.py +++ b/src/lightning/pytorch/plugins/precision/__init__.py @@ -11,16 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.plugins.precision.amp import MixedPrecisionPlugin -from pytorch_lightning.plugins.precision.colossalai import ColossalAIPrecisionPlugin -from pytorch_lightning.plugins.precision.deepspeed import DeepSpeedPrecisionPlugin -from pytorch_lightning.plugins.precision.double import DoublePrecisionPlugin -from pytorch_lightning.plugins.precision.fsdp import FSDPMixedPrecisionPlugin -from pytorch_lightning.plugins.precision.hpu import HPUPrecisionPlugin -from pytorch_lightning.plugins.precision.ipu import IPUPrecisionPlugin -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.plugins.precision.tpu import TPUPrecisionPlugin -from pytorch_lightning.plugins.precision.tpu_bf16 import TPUBf16PrecisionPlugin +from lightning.pytorch.plugins.precision.amp import MixedPrecisionPlugin +from lightning.pytorch.plugins.precision.colossalai import ColossalAIPrecisionPlugin +from lightning.pytorch.plugins.precision.deepspeed import DeepSpeedPrecisionPlugin +from lightning.pytorch.plugins.precision.double import DoublePrecisionPlugin +from lightning.pytorch.plugins.precision.fsdp import FSDPMixedPrecisionPlugin +from lightning.pytorch.plugins.precision.hpu import HPUPrecisionPlugin +from lightning.pytorch.plugins.precision.ipu import IPUPrecisionPlugin +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.plugins.precision.tpu import TPUPrecisionPlugin +from lightning.pytorch.plugins.precision.tpu_bf16 import TPUBf16PrecisionPlugin __all__ = [ "ColossalAIPrecisionPlugin", diff --git a/src/pytorch_lightning/plugins/precision/amp.py b/src/lightning/pytorch/plugins/precision/amp.py similarity index 94% rename from src/pytorch_lightning/plugins/precision/amp.py rename to src/lightning/pytorch/plugins/precision/amp.py index f5262a5c7c..ce984070ae 100644 --- a/src/pytorch_lightning/plugins/precision/amp.py +++ b/src/lightning/pytorch/plugins/precision/amp.py @@ -16,12 +16,12 @@ import torch from torch import Tensor from torch.optim import LBFGS, Optimizer -import pytorch_lightning as pl -from lightning_fabric.accelerators.cuda import _patch_cuda_is_available -from lightning_fabric.utilities.types import Optimizable -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.utilities import GradClipAlgorithmType -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.fabric.accelerators.cuda import _patch_cuda_is_available +from lightning.fabric.utilities.types import Optimizable +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.utilities import GradClipAlgorithmType +from lightning.pytorch.utilities.exceptions import MisconfigurationException class MixedPrecisionPlugin(PrecisionPlugin): diff --git a/src/pytorch_lightning/plugins/precision/colossalai.py b/src/lightning/pytorch/plugins/precision/colossalai.py similarity index 93% rename from src/pytorch_lightning/plugins/precision/colossalai.py rename to src/lightning/pytorch/plugins/precision/colossalai.py index 1bb8a95416..db31cc7109 100644 --- a/src/pytorch_lightning/plugins/precision/colossalai.py +++ b/src/lightning/pytorch/plugins/precision/colossalai.py @@ -16,10 +16,10 @@ from typing import Any, Callable, cast, Literal, Optional, Union from torch import Tensor from torch.optim import Optimizer -import pytorch_lightning as pl -from lightning_fabric.utilities.types import Steppable -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.utilities.rank_zero import WarningCache +import lightning.pytorch as pl +from lightning.fabric.utilities.types import Steppable +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.utilities.rank_zero import WarningCache warning_cache = WarningCache() diff --git a/src/pytorch_lightning/plugins/precision/deepspeed.py b/src/lightning/pytorch/plugins/precision/deepspeed.py similarity index 90% rename from src/pytorch_lightning/plugins/precision/deepspeed.py rename to src/lightning/pytorch/plugins/precision/deepspeed.py index d2a824a3fb..b6ab1c8477 100644 --- a/src/pytorch_lightning/plugins/precision/deepspeed.py +++ b/src/lightning/pytorch/plugins/precision/deepspeed.py @@ -17,14 +17,14 @@ from torch import Tensor from torch.optim import LBFGS, Optimizer from typing_extensions import get_args -import pytorch_lightning as pl -from lightning_fabric.strategies.deepspeed import _DEEPSPEED_AVAILABLE -from lightning_fabric.utilities.types import Steppable -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.utilities import GradClipAlgorithmType -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import WarningCache +import lightning.pytorch as pl +from lightning.fabric.strategies.deepspeed import _DEEPSPEED_AVAILABLE +from lightning.fabric.utilities.types import Steppable +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.utilities import GradClipAlgorithmType +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import WarningCache if TYPE_CHECKING and _DEEPSPEED_AVAILABLE: import deepspeed diff --git a/src/pytorch_lightning/plugins/precision/double.py b/src/lightning/pytorch/plugins/precision/double.py similarity index 94% rename from src/pytorch_lightning/plugins/precision/double.py rename to src/lightning/pytorch/plugins/precision/double.py index fd8136972e..a31842cb32 100644 --- a/src/pytorch_lightning/plugins/precision/double.py +++ b/src/lightning/pytorch/plugins/precision/double.py @@ -20,10 +20,10 @@ from lightning_utilities.core.apply_func import apply_to_collection from torch import FloatTensor, Tensor from torch.optim import Optimizer -import pytorch_lightning as pl -from lightning_fabric.plugins.precision.utils import _convert_fp_tensor -from pytorch_lightning.overrides.base import _LightningPrecisionModuleWrapperBase -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin +import lightning.pytorch as pl +from lightning.fabric.plugins.precision.utils import _convert_fp_tensor +from lightning.pytorch.overrides.base import _LightningPrecisionModuleWrapperBase +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin class LightningDoublePrecisionModule(_LightningPrecisionModuleWrapperBase): diff --git a/src/pytorch_lightning/plugins/precision/fsdp.py b/src/lightning/pytorch/plugins/precision/fsdp.py similarity index 93% rename from src/pytorch_lightning/plugins/precision/fsdp.py rename to src/lightning/pytorch/plugins/precision/fsdp.py index 26c3d371ad..3c2ee2d384 100644 --- a/src/pytorch_lightning/plugins/precision/fsdp.py +++ b/src/lightning/pytorch/plugins/precision/fsdp.py @@ -15,9 +15,9 @@ from typing import Any, Literal, Optional import torch -from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12 -from pytorch_lightning.plugins.precision.amp import MixedPrecisionPlugin -from pytorch_lightning.utilities.exceptions import MisconfigurationException +from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12 +from lightning.pytorch.plugins.precision.amp import MixedPrecisionPlugin +from lightning.pytorch.utilities.exceptions import MisconfigurationException if _TORCH_GREATER_EQUAL_1_12 and torch.distributed.is_available(): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision diff --git a/src/pytorch_lightning/plugins/precision/hpu.py b/src/lightning/pytorch/plugins/precision/hpu.py similarity index 92% rename from src/pytorch_lightning/plugins/precision/hpu.py rename to src/lightning/pytorch/plugins/precision/hpu.py index 29b00f1214..fa8ec1454b 100644 --- a/src/pytorch_lightning/plugins/precision/hpu.py +++ b/src/lightning/pytorch/plugins/precision/hpu.py @@ -15,9 +15,9 @@ from typing import cast, Literal, Optional, Union from typing_extensions import get_args -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _HPU_AVAILABLE +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _HPU_AVAILABLE if _HPU_AVAILABLE: from habana_frameworks.torch.hpex import hmp diff --git a/src/pytorch_lightning/plugins/precision/ipu.py b/src/lightning/pytorch/plugins/precision/ipu.py similarity index 89% rename from src/pytorch_lightning/plugins/precision/ipu.py rename to src/lightning/pytorch/plugins/precision/ipu.py index 632474ec84..4502922710 100644 --- a/src/pytorch_lightning/plugins/precision/ipu.py +++ b/src/lightning/pytorch/plugins/precision/ipu.py @@ -17,13 +17,13 @@ from torch import Tensor from torch.optim import LBFGS, Optimizer from typing_extensions import get_args -import pytorch_lightning as pl -from lightning_fabric.utilities.types import Optimizable -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.utilities import GradClipAlgorithmType -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import WarningCache +import lightning.pytorch as pl +from lightning.fabric.utilities.types import Optimizable +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.utilities import GradClipAlgorithmType +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import WarningCache warning_cache = WarningCache() diff --git a/src/pytorch_lightning/plugins/precision/precision_plugin.py b/src/lightning/pytorch/plugins/precision/precision_plugin.py similarity index 96% rename from src/pytorch_lightning/plugins/precision/precision_plugin.py rename to src/lightning/pytorch/plugins/precision/precision_plugin.py index 2afabc6acf..55fe1111c7 100644 --- a/src/pytorch_lightning/plugins/precision/precision_plugin.py +++ b/src/lightning/pytorch/plugins/precision/precision_plugin.py @@ -20,11 +20,11 @@ from torch import Tensor from torch.nn import Module from torch.optim import Optimizer -import pytorch_lightning as pl -from lightning_fabric.plugins import Precision as FabricPrecision -from lightning_fabric.utilities.types import Steppable -from pytorch_lightning.core.hooks import CheckpointHooks -from pytorch_lightning.utilities import grad_norm, GradClipAlgorithmType +import lightning.pytorch as pl +from lightning.fabric.plugins import Precision as FabricPrecision +from lightning.fabric.utilities.types import Steppable +from lightning.pytorch.core.hooks import CheckpointHooks +from lightning.pytorch.utilities import grad_norm, GradClipAlgorithmType class PrecisionPlugin(FabricPrecision, CheckpointHooks): diff --git a/src/pytorch_lightning/plugins/precision/tpu.py b/src/lightning/pytorch/plugins/precision/tpu.py similarity index 89% rename from src/pytorch_lightning/plugins/precision/tpu.py rename to src/lightning/pytorch/plugins/precision/tpu.py index d8b96274de..308e77ad8b 100644 --- a/src/pytorch_lightning/plugins/precision/tpu.py +++ b/src/lightning/pytorch/plugins/precision/tpu.py @@ -14,11 +14,11 @@ from functools import partial from typing import Any, Callable -import pytorch_lightning as pl -from lightning_fabric.accelerators.tpu import _XLA_AVAILABLE -from lightning_fabric.utilities.types import Optimizable -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.fabric.accelerators.tpu import _XLA_AVAILABLE +from lightning.fabric.utilities.types import Optimizable +from lightning.pytorch.plugins.precision.precision_plugin import PrecisionPlugin +from lightning.pytorch.utilities.exceptions import MisconfigurationException class TPUPrecisionPlugin(PrecisionPlugin): diff --git a/src/pytorch_lightning/plugins/precision/tpu_bf16.py b/src/lightning/pytorch/plugins/precision/tpu_bf16.py similarity index 94% rename from src/pytorch_lightning/plugins/precision/tpu_bf16.py rename to src/lightning/pytorch/plugins/precision/tpu_bf16.py index 2160c0f7c7..ed7cb8e6b9 100644 --- a/src/pytorch_lightning/plugins/precision/tpu_bf16.py +++ b/src/lightning/pytorch/plugins/precision/tpu_bf16.py @@ -17,7 +17,7 @@ from typing import Any, List, Literal, Tuple import torch.nn as nn from torch.optim import Optimizer -from pytorch_lightning.plugins.precision import TPUPrecisionPlugin +from lightning.pytorch.plugins.precision import TPUPrecisionPlugin class TPUBf16PrecisionPlugin(TPUPrecisionPlugin): diff --git a/src/pytorch_lightning/profilers/__init__.py b/src/lightning/pytorch/profilers/__init__.py similarity index 66% rename from src/pytorch_lightning/profilers/__init__.py rename to src/lightning/pytorch/profilers/__init__.py index e2cf2bf8ae..f18277caf8 100644 --- a/src/pytorch_lightning/profilers/__init__.py +++ b/src/lightning/pytorch/profilers/__init__.py @@ -11,12 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.profilers.advanced import AdvancedProfiler -from pytorch_lightning.profilers.base import PassThroughProfiler -from pytorch_lightning.profilers.profiler import Profiler -from pytorch_lightning.profilers.pytorch import PyTorchProfiler -from pytorch_lightning.profilers.simple import SimpleProfiler -from pytorch_lightning.profilers.xla import XLAProfiler +from lightning.pytorch.profilers.advanced import AdvancedProfiler +from lightning.pytorch.profilers.base import PassThroughProfiler +from lightning.pytorch.profilers.profiler import Profiler +from lightning.pytorch.profilers.pytorch import PyTorchProfiler +from lightning.pytorch.profilers.simple import SimpleProfiler +from lightning.pytorch.profilers.xla import XLAProfiler __all__ = [ "Profiler", diff --git a/src/pytorch_lightning/profilers/advanced.py b/src/lightning/pytorch/profilers/advanced.py similarity index 95% rename from src/pytorch_lightning/profilers/advanced.py rename to src/lightning/pytorch/profilers/advanced.py index 9541d68417..e877f9b502 100644 --- a/src/pytorch_lightning/profilers/advanced.py +++ b/src/lightning/pytorch/profilers/advanced.py @@ -19,7 +19,7 @@ import pstats from pathlib import Path from typing import Dict, Optional, Tuple, Union -from pytorch_lightning.profilers.profiler import Profiler +from lightning.pytorch.profilers.profiler import Profiler log = logging.getLogger(__name__) @@ -40,7 +40,7 @@ class AdvancedProfiler(Profiler): """ Args: dirpath: Directory path for the ``filename``. If ``dirpath`` is ``None`` but ``filename`` is present, the - ``trainer.log_dir`` (from :class:`~pytorch_lightning.loggers.tensorboard.TensorBoardLogger`) + ``trainer.log_dir`` (from :class:`~lightning.pytorch.loggers.tensorboard.TensorBoardLogger`) will be used. filename: If present, filename where the profiler results will be saved instead of printing to stdout. diff --git a/src/pytorch_lightning/profilers/base.py b/src/lightning/pytorch/profilers/base.py similarity index 94% rename from src/pytorch_lightning/profilers/base.py rename to src/lightning/pytorch/profilers/base.py index b728b0c827..4faf4c9a49 100644 --- a/src/pytorch_lightning/profilers/base.py +++ b/src/lightning/pytorch/profilers/base.py @@ -13,7 +13,7 @@ # limitations under the License. """Profiler to check if there are any bottlenecks in your code.""" -from pytorch_lightning.profilers.profiler import Profiler +from lightning.pytorch.profilers.profiler import Profiler class PassThroughProfiler(Profiler): diff --git a/src/pytorch_lightning/profilers/profiler.py b/src/lightning/pytorch/profilers/profiler.py similarity index 98% rename from src/pytorch_lightning/profilers/profiler.py rename to src/lightning/pytorch/profilers/profiler.py index 595f1a324e..90423b0006 100644 --- a/src/pytorch_lightning/profilers/profiler.py +++ b/src/lightning/pytorch/profilers/profiler.py @@ -19,7 +19,7 @@ from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, Dict, Generator, Optional, TextIO, Union -from lightning_fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.cloud_io import get_filesystem log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/profilers/pytorch.py b/src/lightning/pytorch/profilers/pytorch.py similarity index 97% rename from src/pytorch_lightning/profilers/pytorch.py rename to src/lightning/pytorch/profilers/pytorch.py index 15eb76af49..19c6572696 100644 --- a/src/pytorch_lightning/profilers/pytorch.py +++ b/src/lightning/pytorch/profilers/pytorch.py @@ -23,17 +23,17 @@ import torch from torch import nn, Tensor from torch.autograd.profiler import record_function -from lightning_fabric.accelerators.cuda import is_cuda_available -from pytorch_lightning.profilers.profiler import Profiler -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _KINETO_AVAILABLE -from pytorch_lightning.utilities.rank_zero import rank_zero_warn, WarningCache +from lightning.fabric.accelerators.cuda import is_cuda_available +from lightning.pytorch.profilers.profiler import Profiler +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _KINETO_AVAILABLE +from lightning.pytorch.utilities.rank_zero import rank_zero_warn, WarningCache if TYPE_CHECKING: from torch.autograd.profiler import EventList from torch.utils.hooks import RemovableHandle - from pytorch_lightning.core.module import LightningModule + from lightning.pytorch.core.module import LightningModule if _KINETO_AVAILABLE: from torch.profiler import ProfilerAction, ProfilerActivity, tensorboard_trace_handler @@ -51,14 +51,14 @@ class RegisterRecordFunction: The Lightning PyTorch Profiler will activate this feature automatically. It can be deactivated as follows: Example:: - from pytorch_lightning.profilers import PyTorchProfiler + from lightning.pytorch.profilers import PyTorchProfiler profiler = PyTorchProfiler(record_module_names=False) Trainer(profiler=profiler) It can be used outside of Lightning as follows: Example:: - from pytorch_lightning import Trainer, seed_everything + from lightning.pytorch import Trainer, seed_everything with RegisterRecordFunction(model): out = model(batch) """ @@ -248,7 +248,7 @@ class PyTorchProfiler(Profiler): Args: dirpath: Directory path for the ``filename``. If ``dirpath`` is ``None`` but ``filename`` is present, the - ``trainer.log_dir`` (from :class:`~pytorch_lightning.loggers.tensorboard.TensorBoardLogger`) + ``trainer.log_dir`` (from :class:`~lightning.pytorch.loggers.tensorboard.TensorBoardLogger`) will be used. filename: If present, filename where the profiler results will be saved instead of printing to stdout. diff --git a/src/pytorch_lightning/profilers/simple.py b/src/lightning/pytorch/profilers/simple.py similarity index 97% rename from src/pytorch_lightning/profilers/simple.py rename to src/lightning/pytorch/profilers/simple.py index dc2b831fa8..1bc66c47e1 100644 --- a/src/pytorch_lightning/profilers/simple.py +++ b/src/lightning/pytorch/profilers/simple.py @@ -21,7 +21,7 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np -from pytorch_lightning.profilers.profiler import Profiler +from lightning.pytorch.profilers.profiler import Profiler log = logging.getLogger(__name__) @@ -44,7 +44,7 @@ class SimpleProfiler(Profiler): """ Args: dirpath: Directory path for the ``filename``. If ``dirpath`` is ``None`` but ``filename`` is present, the - ``trainer.log_dir`` (from :class:`~pytorch_lightning.loggers.tensorboard.TensorBoardLogger`) + ``trainer.log_dir`` (from :class:`~lightning.pytorch.loggers.tensorboard.TensorBoardLogger`) will be used. filename: If present, filename where the profiler results will be saved instead of printing to stdout. diff --git a/src/pytorch_lightning/profilers/xla.py b/src/lightning/pytorch/profilers/xla.py similarity index 95% rename from src/pytorch_lightning/profilers/xla.py rename to src/lightning/pytorch/profilers/xla.py index 06b22d9843..a36a11d49c 100644 --- a/src/pytorch_lightning/profilers/xla.py +++ b/src/lightning/pytorch/profilers/xla.py @@ -14,8 +14,8 @@ import logging from typing import Dict -from lightning_fabric.accelerators.tpu import _XLA_AVAILABLE -from pytorch_lightning.profilers.profiler import Profiler +from lightning.fabric.accelerators.tpu import _XLA_AVAILABLE +from lightning.pytorch.profilers.profiler import Profiler log = logging.getLogger(__name__) diff --git a/src/lightning/pytorch/serve/__init__.py b/src/lightning/pytorch/serve/__init__.py new file mode 100644 index 0000000000..5f27380cf1 --- /dev/null +++ b/src/lightning/pytorch/serve/__init__.py @@ -0,0 +1,4 @@ +from lightning.pytorch.serve.servable_module import ServableModule +from lightning.pytorch.serve.servable_module_validator import ServableModuleValidator + +__all__ = ["ServableModuleValidator", "ServableModule"] diff --git a/src/pytorch_lightning/serve/servable_module.py b/src/lightning/pytorch/serve/servable_module.py similarity index 94% rename from src/pytorch_lightning/serve/servable_module.py rename to src/lightning/pytorch/serve/servable_module.py index 1ceb42777e..cfb7957cf3 100644 --- a/src/pytorch_lightning/serve/servable_module.py +++ b/src/lightning/pytorch/serve/servable_module.py @@ -20,9 +20,9 @@ class ServableModule(torch.nn.Module): import torch - from pytorch_lightning import Trainer - from pytorch_lightning.demos.boring_classes import BoringModel - from pytorch_lightning.serve.servable_module_validator import ServableModule, ServableModuleValidator + from lightning.pytorch import Trainer + from lightning.pytorch.demos.boring_classes import BoringModel + from lightning.pytorch.serve.servable_module_validator import ServableModule, ServableModuleValidator class ServableBoringModel(BoringModel, ServableModule): diff --git a/src/pytorch_lightning/serve/servable_module_validator.py b/src/lightning/pytorch/serve/servable_module_validator.py similarity index 94% rename from src/pytorch_lightning/serve/servable_module_validator.py rename to src/lightning/pytorch/serve/servable_module_validator.py index c52858e664..7951280bb5 100644 --- a/src/pytorch_lightning/serve/servable_module_validator.py +++ b/src/lightning/pytorch/serve/servable_module_validator.py @@ -7,13 +7,13 @@ import requests import torch from lightning_utilities.core.imports import RequirementCache -import pytorch_lightning as pl -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.serve.servable_module import ServableModule -from pytorch_lightning.strategies import DeepSpeedStrategy, FSDPStrategy -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_only +import lightning.pytorch as pl +from lightning.pytorch.callbacks import Callback +from lightning.pytorch.serve.servable_module import ServableModule +from lightning.pytorch.strategies import DeepSpeedStrategy, FSDPStrategy +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_only _NOT_SUPPORTED_STRATEGIES = ( DeepSpeedStrategy, diff --git a/src/lightning/pytorch/strategies/__init__.py b/src/lightning/pytorch/strategies/__init__.py new file mode 100644 index 0000000000..c02c207f3d --- /dev/null +++ b/src/lightning/pytorch/strategies/__init__.py @@ -0,0 +1,34 @@ +# Copyright The Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from lightning.fabric.strategies.registry import _StrategyRegistry +from lightning.pytorch.strategies.bagua import BaguaStrategy # noqa: F401 +from lightning.pytorch.strategies.colossalai import ColossalAIStrategy # noqa: F401 +from lightning.pytorch.strategies.ddp import DDPStrategy # noqa: F401 +from lightning.pytorch.strategies.ddp_spawn import DDPSpawnStrategy # noqa: F401 +from lightning.pytorch.strategies.deepspeed import DeepSpeedStrategy # noqa: F401 +from lightning.pytorch.strategies.dp import DataParallelStrategy # noqa: F401 +from lightning.pytorch.strategies.fsdp import FSDPStrategy # noqa: F401 +from lightning.pytorch.strategies.hpu_parallel import HPUParallelStrategy # noqa: F401 +from lightning.pytorch.strategies.ipu import IPUStrategy # noqa: F401 +from lightning.pytorch.strategies.parallel import ParallelStrategy # noqa: F401 +from lightning.pytorch.strategies.single_device import SingleDeviceStrategy # noqa: F401 +from lightning.pytorch.strategies.single_hpu import SingleHPUStrategy # noqa: F401 +from lightning.pytorch.strategies.single_tpu import SingleTPUStrategy # noqa: F401 +from lightning.pytorch.strategies.strategy import Strategy # noqa: F401 +from lightning.pytorch.strategies.tpu_spawn import TPUSpawnStrategy # noqa: F401 +from lightning.pytorch.strategies.utils import _call_register_strategies + +_STRATEGIES_BASE_MODULE = "lightning.pytorch.strategies" +StrategyRegistry = _StrategyRegistry() +_call_register_strategies(StrategyRegistry, _STRATEGIES_BASE_MODULE) diff --git a/src/pytorch_lightning/strategies/bagua.py b/src/lightning/pytorch/strategies/bagua.py similarity index 95% rename from src/pytorch_lightning/strategies/bagua.py rename to src/lightning/pytorch/strategies/bagua.py index 252ad4db00..7659ba21ed 100644 --- a/src/pytorch_lightning/strategies/bagua.py +++ b/src/lightning/pytorch/strategies/bagua.py @@ -20,17 +20,17 @@ from lightning_utilities.core.imports import module_available from torch import Tensor from torch.nn import Module -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.utilities.optimizer import _optimizers_to_device -from lightning_fabric.utilities.seed import reset_seed -from lightning_fabric.utilities.types import ReduceOp -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.ddp import DDPStrategy -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.exceptions import MisconfigurationException +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.utilities.optimizer import _optimizers_to_device +from lightning.fabric.utilities.seed import reset_seed +from lightning.fabric.utilities.types import ReduceOp +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.ddp import DDPStrategy +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.exceptions import MisconfigurationException _BAGUA_AVAILABLE = module_available("bagua.torch_api") diff --git a/src/pytorch_lightning/strategies/colossalai.py b/src/lightning/pytorch/strategies/colossalai.py similarity index 96% rename from src/pytorch_lightning/strategies/colossalai.py rename to src/lightning/pytorch/strategies/colossalai.py index e7a9877859..69964bb61f 100644 --- a/src/pytorch_lightning/strategies/colossalai.py +++ b/src/lightning/pytorch/strategies/colossalai.py @@ -20,20 +20,20 @@ from torch import Tensor from torch.nn import Module from torch.optim.optimizer import Optimizer -import pytorch_lightning as pl -from lightning_fabric.accelerators.cuda import _patch_cuda_is_available -from lightning_fabric.plugins.environments.cluster_environment import ClusterEnvironment -from lightning_fabric.utilities.distributed import ReduceOp -from pytorch_lightning.accelerators.cuda import CUDAAccelerator -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase -from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO -from pytorch_lightning.plugins.precision import ColossalAIPrecisionPlugin -from pytorch_lightning.strategies.ddp import DDPStrategy -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.accelerators.cuda import _patch_cuda_is_available +from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment +from lightning.fabric.utilities.distributed import ReduceOp +from lightning.pytorch.accelerators.cuda import CUDAAccelerator +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase +from lightning.pytorch.plugins.io.checkpoint_plugin import CheckpointIO +from lightning.pytorch.plugins.precision import ColossalAIPrecisionPlugin +from lightning.pytorch.strategies.ddp import DDPStrategy +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.types import STEP_OUTPUT _COLOSSALAI_AVAILABLE = RequirementCache("colossalai") _COLOSSALAI_GREATER_0_1_10 = RequirementCache("colossalai>0.1.10") diff --git a/src/pytorch_lightning/strategies/ddp.py b/src/lightning/pytorch/strategies/ddp.py similarity index 93% rename from src/pytorch_lightning/strategies/ddp.py rename to src/lightning/pytorch/strategies/ddp.py index 70caa8c9f5..2636678477 100644 --- a/src/pytorch_lightning/strategies/ddp.py +++ b/src/lightning/pytorch/strategies/ddp.py @@ -22,31 +22,31 @@ from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel from torch.optim.optimizer import Optimizer -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.plugins.collectives.torch_collective import default_pg_timeout -from lightning_fabric.utilities.distributed import ( +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout +from lightning.fabric.utilities.distributed import ( _distributed_available, _get_default_process_group_backend_for_device, _init_dist_connection, _sync_ddp_if_available, ) -from lightning_fabric.utilities.distributed import group as _group -from lightning_fabric.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_11 -from lightning_fabric.utilities.optimizer import _optimizers_to_device -from lightning_fabric.utilities.seed import reset_seed -from lightning_fabric.utilities.types import ReduceOp -from pytorch_lightning.core.optimizer import LightningOptimizer -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase -from pytorch_lightning.overrides.distributed import prepare_for_backward -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.launchers.subprocess_script import _SubprocessScriptLauncher -from pytorch_lightning.strategies.parallel import ParallelStrategy -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.distributed import register_ddp_comm_hook -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_only -from pytorch_lightning.utilities.types import PredictStep, STEP_OUTPUT, TestStep, ValidationStep +from lightning.fabric.utilities.distributed import group as _group +from lightning.fabric.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_11 +from lightning.fabric.utilities.optimizer import _optimizers_to_device +from lightning.fabric.utilities.seed import reset_seed +from lightning.fabric.utilities.types import ReduceOp +from lightning.pytorch.core.optimizer import LightningOptimizer +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase +from lightning.pytorch.overrides.distributed import prepare_for_backward +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.launchers.subprocess_script import _SubprocessScriptLauncher +from lightning.pytorch.strategies.parallel import ParallelStrategy +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.distributed import register_ddp_comm_hook +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_only +from lightning.pytorch.utilities.types import PredictStep, STEP_OUTPUT, TestStep, ValidationStep if torch.distributed.is_available(): from torch.distributed.algorithms.model_averaging.averagers import ModelAverager diff --git a/src/pytorch_lightning/strategies/ddp_spawn.py b/src/lightning/pytorch/strategies/ddp_spawn.py similarity index 92% rename from src/pytorch_lightning/strategies/ddp_spawn.py rename to src/lightning/pytorch/strategies/ddp_spawn.py index 9a9ca12d1f..c5336b73d4 100644 --- a/src/pytorch_lightning/strategies/ddp_spawn.py +++ b/src/lightning/pytorch/strategies/ddp_spawn.py @@ -22,29 +22,29 @@ from torch import Tensor from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.plugins.collectives.torch_collective import default_pg_timeout -from lightning_fabric.utilities.distributed import ( +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout +from lightning.fabric.utilities.distributed import ( _distributed_available, _get_default_process_group_backend_for_device, _init_dist_connection, _sync_ddp_if_available, ) -from lightning_fabric.utilities.distributed import group as _group -from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_11 -from lightning_fabric.utilities.optimizer import _optimizers_to_device -from lightning_fabric.utilities.types import ReduceOp -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase -from pytorch_lightning.overrides.distributed import prepare_for_backward -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.launchers.multiprocessing import _MultiProcessingLauncher -from pytorch_lightning.strategies.parallel import ParallelStrategy -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.distributed import register_ddp_comm_hook -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_only -from pytorch_lightning.utilities.types import PredictStep, STEP_OUTPUT, TestStep, ValidationStep +from lightning.fabric.utilities.distributed import group as _group +from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_11 +from lightning.fabric.utilities.optimizer import _optimizers_to_device +from lightning.fabric.utilities.types import ReduceOp +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase +from lightning.pytorch.overrides.distributed import prepare_for_backward +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.launchers.multiprocessing import _MultiProcessingLauncher +from lightning.pytorch.strategies.parallel import ParallelStrategy +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.distributed import register_ddp_comm_hook +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_only +from lightning.pytorch.utilities.types import PredictStep, STEP_OUTPUT, TestStep, ValidationStep log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/strategies/deepspeed.py b/src/lightning/pytorch/strategies/deepspeed.py similarity index 97% rename from src/pytorch_lightning/strategies/deepspeed.py rename to src/lightning/pytorch/strategies/deepspeed.py index 830d052271..2e5fd18f3c 100644 --- a/src/pytorch_lightning/strategies/deepspeed.py +++ b/src/lightning/pytorch/strategies/deepspeed.py @@ -27,24 +27,24 @@ from torch import Tensor from torch.nn import Module from torch.optim import Optimizer -import pytorch_lightning as pl -from lightning_fabric.plugins import ClusterEnvironment -from lightning_fabric.strategies.deepspeed import _DEEPSPEED_AVAILABLE -from lightning_fabric.utilities.optimizer import _optimizers_to_device -from lightning_fabric.utilities.seed import reset_seed -from lightning_fabric.utilities.types import _PATH, LRScheduler, ReduceLROnPlateau -from pytorch_lightning.accelerators.cuda import CUDAAccelerator -from pytorch_lightning.core.optimizer import _init_optimizers_and_lr_schedulers -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.ddp import DDPStrategy -from pytorch_lightning.strategies.utils import _fp_to_half -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities import GradClipAlgorithmType -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_only, rank_zero_warn, WarningCache -from pytorch_lightning.utilities.types import LRSchedulerConfig, STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.plugins import ClusterEnvironment +from lightning.fabric.strategies.deepspeed import _DEEPSPEED_AVAILABLE +from lightning.fabric.utilities.optimizer import _optimizers_to_device +from lightning.fabric.utilities.seed import reset_seed +from lightning.fabric.utilities.types import _PATH, LRScheduler, ReduceLROnPlateau +from lightning.pytorch.accelerators.cuda import CUDAAccelerator +from lightning.pytorch.core.optimizer import _init_optimizers_and_lr_schedulers +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase, _LightningPrecisionModuleWrapperBase +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.ddp import DDPStrategy +from lightning.pytorch.strategies.utils import _fp_to_half +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities import GradClipAlgorithmType +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_only, rank_zero_warn, WarningCache +from lightning.pytorch.utilities.types import LRSchedulerConfig, STEP_OUTPUT log = logging.getLogger(__name__) warning_cache = WarningCache() @@ -786,7 +786,7 @@ class DeepSpeedStrategy(DDPStrategy): # Rely on deepspeed to load the checkpoint and necessary information assert self.lightning_module is not None - from pytorch_lightning.trainer.states import TrainerFn + from lightning.pytorch.trainer.states import TrainerFn is_fitting = self.lightning_module.trainer.state.fn == TrainerFn.FITTING _, client_state = self.deepspeed_engine.load_checkpoint( diff --git a/src/pytorch_lightning/strategies/dp.py b/src/lightning/pytorch/strategies/dp.py similarity index 90% rename from src/pytorch_lightning/strategies/dp.py rename to src/lightning/pytorch/strategies/dp.py index 838e8b15cd..1631bf12bf 100644 --- a/src/pytorch_lightning/strategies/dp.py +++ b/src/lightning/pytorch/strategies/dp.py @@ -18,16 +18,16 @@ from lightning_utilities.core.apply_func import apply_to_collection from torch import Tensor from torch.nn import DataParallel, Module -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO -from lightning_fabric.utilities.distributed import ReduceOp -from pytorch_lightning.overrides.base import _LightningPrecisionModuleWrapperBase -from pytorch_lightning.overrides.data_parallel import LightningParallelModule -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.parallel import ParallelStrategy -from pytorch_lightning.strategies.strategy import TBroadcast, TReduce -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.utilities.distributed import ReduceOp +from lightning.pytorch.overrides.base import _LightningPrecisionModuleWrapperBase +from lightning.pytorch.overrides.data_parallel import LightningParallelModule +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.parallel import ParallelStrategy +from lightning.pytorch.strategies.strategy import TBroadcast, TReduce +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.types import STEP_OUTPUT class DataParallelStrategy(ParallelStrategy): diff --git a/src/pytorch_lightning/strategies/fsdp.py b/src/lightning/pytorch/strategies/fsdp.py similarity index 93% rename from src/pytorch_lightning/strategies/fsdp.py rename to src/lightning/pytorch/strategies/fsdp.py index 58af256835..e5d27452ea 100644 --- a/src/pytorch_lightning/strategies/fsdp.py +++ b/src/lightning/pytorch/strategies/fsdp.py @@ -19,35 +19,35 @@ import torch from torch import Tensor from torch.nn import Module -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.strategies.fsdp import ( +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.strategies.fsdp import ( _init_cpu_offload, _optimizer_has_flat_params, _setup_activation_checkpointing, ) -from lightning_fabric.utilities.distributed import ( +from lightning.fabric.utilities.distributed import ( _get_default_process_group_backend_for_device, _init_dist_connection, _sync_ddp_if_available, ) -from lightning_fabric.utilities.distributed import group as _group -from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12 -from lightning_fabric.utilities.optimizer import _optimizers_to_device -from lightning_fabric.utilities.seed import reset_seed -from lightning_fabric.utilities.types import ProcessGroup, ReduceOp -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.plugins.precision.fsdp import FSDPMixedPrecisionPlugin -from pytorch_lightning.strategies.launchers.subprocess_script import _SubprocessScriptLauncher -from pytorch_lightning.strategies.parallel import ParallelStrategy -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_13 -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_only -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.fabric.utilities.distributed import group as _group +from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12 +from lightning.fabric.utilities.optimizer import _optimizers_to_device +from lightning.fabric.utilities.seed import reset_seed +from lightning.fabric.utilities.types import ProcessGroup, ReduceOp +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.plugins.precision.fsdp import FSDPMixedPrecisionPlugin +from lightning.pytorch.strategies.launchers.subprocess_script import _SubprocessScriptLauncher +from lightning.pytorch.strategies.parallel import ParallelStrategy +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _TORCH_GREATER_EQUAL_1_13 +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_only +from lightning.pytorch.utilities.types import STEP_OUTPUT _distributed_available = torch.distributed.is_available() _fsdp_available = _TORCH_GREATER_EQUAL_1_12 and _distributed_available diff --git a/src/pytorch_lightning/strategies/hpu_parallel.py b/src/lightning/pytorch/strategies/hpu_parallel.py similarity index 90% rename from src/pytorch_lightning/strategies/hpu_parallel.py rename to src/lightning/pytorch/strategies/hpu_parallel.py index 3fb50d6581..c22e9b5863 100644 --- a/src/pytorch_lightning/strategies/hpu_parallel.py +++ b/src/lightning/pytorch/strategies/hpu_parallel.py @@ -19,18 +19,18 @@ import torch.distributed from torch.nn import Module from torch.optim.optimizer import Optimizer -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.utilities.distributed import group as _group -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase -from pytorch_lightning.overrides.torch_distributed import broadcast_object_list -from pytorch_lightning.plugins.io.hpu_plugin import HPUCheckpointIO -from pytorch_lightning.plugins.io.wrapper import _WrappingCheckpointIO -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.ddp import DDPStrategy -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _HPU_AVAILABLE, _TORCH_LESSER_EQUAL_1_10_2 -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.utilities.distributed import group as _group +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase +from lightning.pytorch.overrides.torch_distributed import broadcast_object_list +from lightning.pytorch.plugins.io.hpu_plugin import HPUCheckpointIO +from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.ddp import DDPStrategy +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _HPU_AVAILABLE, _TORCH_LESSER_EQUAL_1_10_2 +from lightning.pytorch.utilities.types import STEP_OUTPUT if _HPU_AVAILABLE: import habana_frameworks.torch.core as htcore diff --git a/src/pytorch_lightning/strategies/ipu.py b/src/lightning/pytorch/strategies/ipu.py similarity index 95% rename from src/pytorch_lightning/strategies/ipu.py rename to src/lightning/pytorch/strategies/ipu.py index 044b50ecd2..6a513898c8 100644 --- a/src/pytorch_lightning/strategies/ipu.py +++ b/src/lightning/pytorch/strategies/ipu.py @@ -20,20 +20,20 @@ from lightning_utilities.core.apply_func import apply_to_collection from torch import Tensor from torch.utils.data import DataLoader, Sampler -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.utilities.cloud_io import get_filesystem -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.parallel import ParallelStrategy -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.strategies.utils import _fp_to_half -from pytorch_lightning.trainer.states import RunningStage, TrainerFn -from pytorch_lightning.utilities import _IPU_AVAILABLE, _POPTORCH_AVAILABLE, rank_zero_warn -from pytorch_lightning.utilities.data import _get_dataloader_init_args_and_kwargs, _reinstantiate_wrapped_cls -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.parallel import ParallelStrategy +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.strategies.utils import _fp_to_half +from lightning.pytorch.trainer.states import RunningStage, TrainerFn +from lightning.pytorch.utilities import _IPU_AVAILABLE, _POPTORCH_AVAILABLE, rank_zero_warn +from lightning.pytorch.utilities.data import _get_dataloader_init_args_and_kwargs, _reinstantiate_wrapped_cls +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.types import STEP_OUTPUT if _POPTORCH_AVAILABLE: import poptorch diff --git a/src/pytorch_lightning/strategies/launchers/__init__.py b/src/lightning/pytorch/strategies/launchers/__init__.py similarity index 79% rename from src/pytorch_lightning/strategies/launchers/__init__.py rename to src/lightning/pytorch/strategies/launchers/__init__.py index 16e5cfc001..0180d1ffb8 100644 --- a/src/pytorch_lightning/strategies/launchers/__init__.py +++ b/src/lightning/pytorch/strategies/launchers/__init__.py @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.strategies.launchers.multiprocessing import _MultiProcessingLauncher -from pytorch_lightning.strategies.launchers.subprocess_script import _SubprocessScriptLauncher -from pytorch_lightning.strategies.launchers.xla import _XLALauncher +from lightning.pytorch.strategies.launchers.multiprocessing import _MultiProcessingLauncher +from lightning.pytorch.strategies.launchers.subprocess_script import _SubprocessScriptLauncher +from lightning.pytorch.strategies.launchers.xla import _XLALauncher __all__ = [ "_MultiProcessingLauncher", diff --git a/src/pytorch_lightning/strategies/launchers/launcher.py b/src/lightning/pytorch/strategies/launchers/launcher.py similarity index 86% rename from src/pytorch_lightning/strategies/launchers/launcher.py rename to src/lightning/pytorch/strategies/launchers/launcher.py index 352949552e..b84bb0e436 100644 --- a/src/pytorch_lightning/strategies/launchers/launcher.py +++ b/src/lightning/pytorch/strategies/launchers/launcher.py @@ -13,8 +13,8 @@ # limitations under the License. from abc import ABC, abstractmethod -from lightning_fabric.strategies.launchers.launcher import _Launcher as _FabricLauncher -from pytorch_lightning.trainer.connectors.signal_connector import _SIGNUM +from lightning.fabric.strategies.launchers.launcher import _Launcher as _FabricLauncher +from lightning.pytorch.trainer.connectors.signal_connector import _SIGNUM class _Launcher(_FabricLauncher, ABC): diff --git a/src/pytorch_lightning/strategies/launchers/multiprocessing.py b/src/lightning/pytorch/strategies/launchers/multiprocessing.py similarity index 94% rename from src/pytorch_lightning/strategies/launchers/multiprocessing.py rename to src/lightning/pytorch/strategies/launchers/multiprocessing.py index 84f826ead4..2def93b772 100644 --- a/src/pytorch_lightning/strategies/launchers/multiprocessing.py +++ b/src/lightning/pytorch/strategies/launchers/multiprocessing.py @@ -27,16 +27,16 @@ import torch.multiprocessing as mp from lightning_utilities.core.apply_func import apply_to_collection from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.strategies.launchers.multiprocessing import _check_bad_cuda_fork -from lightning_fabric.utilities import move_data_to_device -from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_11 -from lightning_fabric.utilities.seed import _collect_rng_states, _set_rng_states -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.strategies.launchers.launcher import _Launcher -from pytorch_lightning.trainer.connectors.signal_connector import _SIGNUM -from pytorch_lightning.trainer.states import TrainerFn, TrainerState -from pytorch_lightning.utilities.rank_zero import rank_zero_debug +import lightning.pytorch as pl +from lightning.fabric.strategies.launchers.multiprocessing import _check_bad_cuda_fork +from lightning.fabric.utilities import move_data_to_device +from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_11 +from lightning.fabric.utilities.seed import _collect_rng_states, _set_rng_states +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.strategies.launchers.launcher import _Launcher +from lightning.pytorch.trainer.connectors.signal_connector import _SIGNUM +from lightning.pytorch.trainer.states import TrainerFn, TrainerState +from lightning.pytorch.utilities.rank_zero import rank_zero_debug log = logging.getLogger(__name__) @@ -92,7 +92,7 @@ class _MultiProcessingLauncher(_Launcher): Arguments: function: The entry point for all launched processes. *args: Optional positional arguments to be passed to the given function. - trainer: Optional reference to the :class:`~pytorch_lightning.trainer.trainer.Trainer` for which + trainer: Optional reference to the :class:`~lightning.pytorch.trainer.trainer.Trainer` for which a selected set of attributes get restored in the main process after processes join. **kwargs: Optional keyword arguments to be passed to the given function. """ @@ -203,7 +203,7 @@ class _MultiProcessingLauncher(_Launcher): def _check_torchdistx_support(self) -> None: if self._start_method == "spawn": - from pytorch_lightning.utilities.meta import _is_deferred + from lightning.pytorch.utilities.meta import _is_deferred if _is_deferred(self._strategy.lightning_module): raise NotImplementedError( diff --git a/src/pytorch_lightning/strategies/launchers/subprocess_script.py b/src/lightning/pytorch/strategies/launchers/subprocess_script.py similarity index 94% rename from src/pytorch_lightning/strategies/launchers/subprocess_script.py rename to src/lightning/pytorch/strategies/launchers/subprocess_script.py index 1ce4f95a11..66739ccfba 100644 --- a/src/pytorch_lightning/strategies/launchers/subprocess_script.py +++ b/src/lightning/pytorch/strategies/launchers/subprocess_script.py @@ -18,11 +18,11 @@ from typing import Any, Callable, List, Optional from lightning_utilities.core.imports import RequirementCache -import pytorch_lightning as pl -from lightning_fabric.plugins import ClusterEnvironment -from lightning_fabric.strategies.launchers.subprocess_script import _basic_subprocess_cmd, _hydra_subprocess_cmd -from pytorch_lightning.strategies.launchers.launcher import _Launcher -from pytorch_lightning.trainer.connectors.signal_connector import _SIGNUM +import lightning.pytorch as pl +from lightning.fabric.plugins import ClusterEnvironment +from lightning.fabric.strategies.launchers.subprocess_script import _basic_subprocess_cmd, _hydra_subprocess_cmd +from lightning.pytorch.strategies.launchers.launcher import _Launcher +from lightning.pytorch.trainer.connectors.signal_connector import _SIGNUM log = logging.getLogger(__name__) _HYDRA_AVAILABLE = RequirementCache("hydra-core") @@ -84,7 +84,7 @@ class _SubprocessScriptLauncher(_Launcher): function: A callback function to execute after all processes have been created. It is up to the implementation of this function to synchronize the processes, e.g., with barriers. *args: Optional positional arguments to be passed to the given function. - trainer: Optional reference to the :class:`~pytorch_lightning.trainer.trainer.Trainer`. + trainer: Optional reference to the :class:`~lightning.pytorch.trainer.trainer.Trainer`. **kwargs: Optional keyword arguments to be passed to the given function. """ if not self.cluster_environment.creates_processes_externally: diff --git a/src/pytorch_lightning/strategies/launchers/xla.py b/src/lightning/pytorch/strategies/launchers/xla.py similarity index 91% rename from src/pytorch_lightning/strategies/launchers/xla.py rename to src/lightning/pytorch/strategies/launchers/xla.py index 47a77eab88..8c9c03678f 100644 --- a/src/pytorch_lightning/strategies/launchers/xla.py +++ b/src/lightning/pytorch/strategies/launchers/xla.py @@ -17,18 +17,18 @@ from typing import Any, Callable, Optional import torch.multiprocessing as mp -import pytorch_lightning as pl -from lightning_fabric.accelerators.tpu import _XLA_AVAILABLE -from lightning_fabric.strategies.launchers.xla import _rank_teardown -from lightning_fabric.utilities import move_data_to_device -from pytorch_lightning.strategies.launchers.multiprocessing import ( +import lightning.pytorch as pl +from lightning.fabric.accelerators.tpu import _XLA_AVAILABLE +from lightning.fabric.strategies.launchers.xla import _rank_teardown +from lightning.fabric.utilities import move_data_to_device +from lightning.pytorch.strategies.launchers.multiprocessing import ( _FakeQueue, _GlobalStateSnapshot, _MultiProcessingLauncher, _WorkerOutput, ) -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.rank_zero import rank_zero_debug +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.rank_zero import rank_zero_debug class _XLALauncher(_MultiProcessingLauncher): @@ -65,7 +65,7 @@ class _XLALauncher(_MultiProcessingLauncher): Arguments: function: The entry point for all launched processes. *args: Optional positional arguments to be passed to the given function. - trainer: Optional reference to the :class:`~pytorch_lightning.trainer.trainer.Trainer` for which + trainer: Optional reference to the :class:`~lightning.pytorch.trainer.trainer.Trainer` for which a selected set of attributes get restored in the main process after processes join. **kwargs: Optional keyword arguments to be passed to the given function. """ diff --git a/src/pytorch_lightning/strategies/parallel.py b/src/lightning/pytorch/strategies/parallel.py similarity index 93% rename from src/pytorch_lightning/strategies/parallel.py rename to src/lightning/pytorch/strategies/parallel.py index e729e815ac..d67f356e2c 100644 --- a/src/pytorch_lightning/strategies/parallel.py +++ b/src/lightning/pytorch/strategies/parallel.py @@ -18,12 +18,12 @@ from typing import Any, Dict, Generator, List, Optional import torch from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment -from lightning_fabric.utilities.distributed import _all_gather_ddp_if_available, ReduceOp -from pytorch_lightning.plugins import LayerSync -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.strategy import Strategy +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment +from lightning.fabric.utilities.distributed import _all_gather_ddp_if_available, ReduceOp +from lightning.pytorch.plugins import LayerSync +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.strategy import Strategy class ParallelStrategy(Strategy, ABC): diff --git a/src/pytorch_lightning/strategies/single_device.py b/src/lightning/pytorch/strategies/single_device.py similarity index 91% rename from src/pytorch_lightning/strategies/single_device.py rename to src/lightning/pytorch/strategies/single_device.py index 1d8862d644..380593e0c1 100644 --- a/src/pytorch_lightning/strategies/single_device.py +++ b/src/lightning/pytorch/strategies/single_device.py @@ -18,11 +18,11 @@ from typing import Any import torch from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.strategy import Strategy, TBroadcast +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.strategy import Strategy, TBroadcast class SingleDeviceStrategy(Strategy): diff --git a/src/pytorch_lightning/strategies/single_hpu.py b/src/lightning/pytorch/strategies/single_hpu.py similarity index 86% rename from src/pytorch_lightning/strategies/single_hpu.py rename to src/lightning/pytorch/strategies/single_hpu.py index a52b303b08..a4f873e720 100644 --- a/src/pytorch_lightning/strategies/single_hpu.py +++ b/src/lightning/pytorch/strategies/single_hpu.py @@ -17,16 +17,16 @@ from typing import Any, Callable, Dict, Optional, Union from torch.nn import Module from torch.optim.optimizer import Optimizer -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO -from lightning_fabric.utilities.types import _DEVICE -from pytorch_lightning.plugins.io.hpu_plugin import HPUCheckpointIO -from pytorch_lightning.plugins.io.wrapper import _WrappingCheckpointIO -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.single_device import SingleDeviceStrategy -from pytorch_lightning.utilities import _HPU_AVAILABLE -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.types import STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.utilities.types import _DEVICE +from lightning.pytorch.plugins.io.hpu_plugin import HPUCheckpointIO +from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.single_device import SingleDeviceStrategy +from lightning.pytorch.utilities import _HPU_AVAILABLE +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.types import STEP_OUTPUT if _HPU_AVAILABLE: import habana_frameworks.torch.core as htcore diff --git a/src/pytorch_lightning/strategies/single_tpu.py b/src/lightning/pytorch/strategies/single_tpu.py similarity index 86% rename from src/pytorch_lightning/strategies/single_tpu.py rename to src/lightning/pytorch/strategies/single_tpu.py index a00ef1bf21..e5c2c9f46c 100644 --- a/src/pytorch_lightning/strategies/single_tpu.py +++ b/src/lightning/pytorch/strategies/single_tpu.py @@ -14,13 +14,13 @@ import os from typing import Dict, Optional -import pytorch_lightning as pl -from lightning_fabric.accelerators.tpu import _XLA_AVAILABLE -from lightning_fabric.plugins import CheckpointIO, XLACheckpointIO -from pytorch_lightning.plugins.io.wrapper import _WrappingCheckpointIO -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.single_device import SingleDeviceStrategy -from pytorch_lightning.utilities import find_shared_parameters, set_shared_parameters +import lightning.pytorch as pl +from lightning.fabric.accelerators.tpu import _XLA_AVAILABLE +from lightning.fabric.plugins import CheckpointIO, XLACheckpointIO +from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.single_device import SingleDeviceStrategy +from lightning.pytorch.utilities import find_shared_parameters, set_shared_parameters class SingleTPUStrategy(SingleDeviceStrategy): diff --git a/src/pytorch_lightning/strategies/strategy.py b/src/lightning/pytorch/strategies/strategy.py similarity index 95% rename from src/pytorch_lightning/strategies/strategy.py rename to src/lightning/pytorch/strategies/strategy.py index 7d0a39e03c..57611b911b 100644 --- a/src/pytorch_lightning/strategies/strategy.py +++ b/src/lightning/pytorch/strategies/strategy.py @@ -22,19 +22,19 @@ from torch.nn import Module from torch.optim import Optimizer from torch.utils.data import DataLoader -import pytorch_lightning as pl -from lightning_fabric.plugins import CheckpointIO -from lightning_fabric.utilities import move_data_to_device -from lightning_fabric.utilities.distributed import ReduceOp -from lightning_fabric.utilities.optimizer import _optimizer_to_device, _optimizers_to_device -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.core.optimizer import _init_optimizers_and_lr_schedulers, LightningOptimizer -from pytorch_lightning.plugins import TorchCheckpointIO -from pytorch_lightning.plugins.io.wrapper import _WrappingCheckpointIO -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.launchers.launcher import _Launcher -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.types import ( +import lightning.pytorch as pl +from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.utilities import move_data_to_device +from lightning.fabric.utilities.distributed import ReduceOp +from lightning.fabric.utilities.optimizer import _optimizer_to_device, _optimizers_to_device +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.core.optimizer import _init_optimizers_and_lr_schedulers, LightningOptimizer +from lightning.pytorch.plugins import TorchCheckpointIO +from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.launchers.launcher import _Launcher +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.types import ( LRSchedulerConfig, PredictStep, STEP_OUTPUT, @@ -362,7 +362,7 @@ class Strategy(ABC): def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT: """The actual training step. - See :meth:`~pytorch_lightning.core.module.LightningModule.training_step` for more details + See :meth:`~lightning.pytorch.core.module.LightningModule.training_step` for more details """ with self.precision_plugin.train_step_context(): assert isinstance(self.model, TrainingStep) @@ -374,7 +374,7 @@ class Strategy(ABC): def validation_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: """The actual validation step. - See :meth:`~pytorch_lightning.core.module.LightningModule.validation_step` for more details + See :meth:`~lightning.pytorch.core.module.LightningModule.validation_step` for more details """ with self.precision_plugin.val_step_context(): assert isinstance(self.model, ValidationStep) @@ -383,7 +383,7 @@ class Strategy(ABC): def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: """The actual test step. - See :meth:`~pytorch_lightning.core.module.LightningModule.test_step` for more details + See :meth:`~lightning.pytorch.core.module.LightningModule.test_step` for more details """ with self.precision_plugin.test_step_context(): assert isinstance(self.model, TestStep) @@ -392,7 +392,7 @@ class Strategy(ABC): def predict_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT: """The actual predict step. - See :meth:`~pytorch_lightning.core.module.LightningModule.predict_step` for more details + See :meth:`~lightning.pytorch.core.module.LightningModule.predict_step` for more details """ with self.precision_plugin.predict_step_context(): assert isinstance(self.model, PredictStep) diff --git a/src/pytorch_lightning/strategies/tpu_spawn.py b/src/lightning/pytorch/strategies/tpu_spawn.py similarity index 91% rename from src/pytorch_lightning/strategies/tpu_spawn.py rename to src/lightning/pytorch/strategies/tpu_spawn.py index 7d065088d6..c747673cf5 100644 --- a/src/pytorch_lightning/strategies/tpu_spawn.py +++ b/src/lightning/pytorch/strategies/tpu_spawn.py @@ -21,25 +21,25 @@ from torch import Tensor from torch.nn import Module from torch.utils.data import DataLoader -import pytorch_lightning as pl -from lightning_fabric.accelerators.tpu import _XLA_AVAILABLE -from lightning_fabric.plugins import CheckpointIO, XLACheckpointIO -from lightning_fabric.plugins.environments import XLAEnvironment -from lightning_fabric.utilities.data import has_len -from lightning_fabric.utilities.optimizer import _optimizers_to_device -from lightning_fabric.utilities.types import _PATH, ReduceOp -from pytorch_lightning.overrides.base import _LightningModuleWrapperBase -from pytorch_lightning.plugins.io.wrapper import _WrappingCheckpointIO -from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy -from pytorch_lightning.strategies.launchers.xla import _XLALauncher -from pytorch_lightning.strategies.strategy import TBroadcast -from pytorch_lightning.trainer.connectors.data_connector import DataConnector -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities import find_shared_parameters, set_shared_parameters -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_only -from pytorch_lightning.utilities.types import EVAL_DATALOADERS, STEP_OUTPUT, TRAIN_DATALOADERS +import lightning.pytorch as pl +from lightning.fabric.accelerators.tpu import _XLA_AVAILABLE +from lightning.fabric.plugins import CheckpointIO, XLACheckpointIO +from lightning.fabric.plugins.environments import XLAEnvironment +from lightning.fabric.utilities.data import has_len +from lightning.fabric.utilities.optimizer import _optimizers_to_device +from lightning.fabric.utilities.types import _PATH, ReduceOp +from lightning.pytorch.overrides.base import _LightningModuleWrapperBase +from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO +from lightning.pytorch.plugins.precision import PrecisionPlugin +from lightning.pytorch.strategies.ddp_spawn import DDPSpawnStrategy +from lightning.pytorch.strategies.launchers.xla import _XLALauncher +from lightning.pytorch.strategies.strategy import TBroadcast +from lightning.pytorch.trainer.connectors.data_connector import DataConnector +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities import find_shared_parameters, set_shared_parameters +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_only +from lightning.pytorch.utilities.types import EVAL_DATALOADERS, STEP_OUTPUT, TRAIN_DATALOADERS if TYPE_CHECKING and _XLA_AVAILABLE: from torch_xla.distributed.parallel_loader import MpDeviceLoader diff --git a/src/pytorch_lightning/strategies/utils.py b/src/lightning/pytorch/strategies/utils.py similarity index 85% rename from src/pytorch_lightning/strategies/utils.py rename to src/lightning/pytorch/strategies/utils.py index dce384353d..bfd5eba682 100644 --- a/src/pytorch_lightning/strategies/utils.py +++ b/src/lightning/pytorch/strategies/utils.py @@ -18,10 +18,10 @@ from typing import Literal import torch from torch import Tensor -from lightning_fabric.plugins.precision.utils import _convert_fp_tensor -from lightning_fabric.strategies import _StrategyRegistry -from lightning_fabric.utilities.registry import _is_register_method_overridden -from pytorch_lightning.strategies.strategy import Strategy +from lightning.fabric.plugins.precision.utils import _convert_fp_tensor +from lightning.fabric.strategies import _StrategyRegistry +from lightning.fabric.utilities.registry import _is_register_method_overridden +from lightning.pytorch.strategies.strategy import Strategy def _call_register_strategies(registry: _StrategyRegistry, base_module: str) -> None: diff --git a/src/pytorch_lightning/trainer/__init__.py b/src/lightning/pytorch/trainer/__init__.py similarity index 84% rename from src/pytorch_lightning/trainer/__init__.py rename to src/lightning/pytorch/trainer/__init__.py index 84456d836d..14acb3e8a9 100644 --- a/src/pytorch_lightning/trainer/__init__.py +++ b/src/lightning/pytorch/trainer/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. """""" -from lightning_fabric.utilities.seed import seed_everything -from pytorch_lightning.trainer.trainer import Trainer +from lightning.fabric.utilities.seed import seed_everything +from lightning.pytorch.trainer.trainer import Trainer __all__ = ["Trainer", "seed_everything"] diff --git a/src/pytorch_lightning/trainer/call.py b/src/lightning/pytorch/trainer/call.py similarity index 91% rename from src/pytorch_lightning/trainer/call.py rename to src/lightning/pytorch/trainer/call.py index 8ba6dcfd4b..d7589b77cd 100644 --- a/src/pytorch_lightning/trainer/call.py +++ b/src/lightning/pytorch/trainer/call.py @@ -13,10 +13,10 @@ # limitations under the License. from typing import Any, Callable -import pytorch_lightning as pl -from pytorch_lightning.trainer.states import TrainerStatus -from pytorch_lightning.utilities.exceptions import _TunerExitException -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.trainer.states import TrainerStatus +from lightning.pytorch.utilities.exceptions import _TunerExitException +from lightning.pytorch.utilities.rank_zero import rank_zero_warn def _call_and_handle_interrupt(trainer: "pl.Trainer", trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any: diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/lightning/pytorch/trainer/configuration_validator.py similarity index 93% rename from src/pytorch_lightning/trainer/configuration_validator.py rename to src/lightning/pytorch/trainer/configuration_validator.py index ab27c49e01..9f33e269fb 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/lightning/pytorch/trainer/configuration_validator.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytorch_lightning as pl -from lightning_fabric.utilities.warnings import PossibleUserWarning -from pytorch_lightning.accelerators.ipu import IPUAccelerator -from pytorch_lightning.strategies import DataParallelStrategy -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature +import lightning.pytorch as pl +from lightning.fabric.utilities.warnings import PossibleUserWarning +from lightning.pytorch.accelerators.ipu import IPUAccelerator +from lightning.pytorch.strategies import DataParallelStrategy +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature def verify_loop_configurations(trainer: "pl.Trainer") -> None: diff --git a/src/pytorch_lightning/trainer/connectors/__init__.py b/src/lightning/pytorch/trainer/connectors/__init__.py similarity index 100% rename from src/pytorch_lightning/trainer/connectors/__init__.py rename to src/lightning/pytorch/trainer/connectors/__init__.py diff --git a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py b/src/lightning/pytorch/trainer/connectors/accelerator_connector.py similarity index 96% rename from src/pytorch_lightning/trainer/connectors/accelerator_connector.py rename to src/lightning/pytorch/trainer/connectors/accelerator_connector.py index 3b5c22c16a..5a1cf95cfa 100644 --- a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/src/lightning/pytorch/trainer/connectors/accelerator_connector.py @@ -20,7 +20,7 @@ from typing import cast, Dict, List, Literal, Optional, Union import torch from typing_extensions import get_args -from lightning_fabric.plugins.environments import ( +from lightning.fabric.plugins.environments import ( ClusterEnvironment, KubeflowEnvironment, LightningEnvironment, @@ -28,16 +28,16 @@ from lightning_fabric.plugins.environments import ( SLURMEnvironment, TorchElasticEnvironment, ) -from lightning_fabric.utilities.device_parser import _determine_root_gpu_device -from lightning_fabric.utilities.imports import _IS_INTERACTIVE, _TORCH_GREATER_EQUAL_1_11 -from pytorch_lightning.accelerators import AcceleratorRegistry -from pytorch_lightning.accelerators.accelerator import Accelerator -from pytorch_lightning.accelerators.cuda import CUDAAccelerator -from pytorch_lightning.accelerators.hpu import HPUAccelerator -from pytorch_lightning.accelerators.ipu import IPUAccelerator -from pytorch_lightning.accelerators.mps import MPSAccelerator -from pytorch_lightning.accelerators.tpu import TPUAccelerator -from pytorch_lightning.plugins import ( +from lightning.fabric.utilities.device_parser import _determine_root_gpu_device +from lightning.fabric.utilities.imports import _IS_INTERACTIVE, _TORCH_GREATER_EQUAL_1_11 +from lightning.pytorch.accelerators import AcceleratorRegistry +from lightning.pytorch.accelerators.accelerator import Accelerator +from lightning.pytorch.accelerators.cuda import CUDAAccelerator +from lightning.pytorch.accelerators.hpu import HPUAccelerator +from lightning.pytorch.accelerators.ipu import IPUAccelerator +from lightning.pytorch.accelerators.mps import MPSAccelerator +from lightning.pytorch.accelerators.tpu import TPUAccelerator +from lightning.pytorch.plugins import ( CheckpointIO, ColossalAIPrecisionPlugin, DeepSpeedPrecisionPlugin, @@ -50,10 +50,10 @@ from pytorch_lightning.plugins import ( TPUBf16PrecisionPlugin, TPUPrecisionPlugin, ) -from pytorch_lightning.plugins.environments import BaguaEnvironment -from pytorch_lightning.plugins.layer_sync import LayerSync, TorchSyncBatchNorm -from pytorch_lightning.plugins.precision.fsdp import FSDPMixedPrecisionPlugin -from pytorch_lightning.strategies import ( +from lightning.pytorch.plugins.environments import BaguaEnvironment +from lightning.pytorch.plugins.layer_sync import LayerSync, TorchSyncBatchNorm +from lightning.pytorch.plugins.precision.fsdp import FSDPMixedPrecisionPlugin +from lightning.pytorch.strategies import ( ColossalAIStrategy, DDPSpawnStrategy, DDPStrategy, @@ -69,10 +69,10 @@ from pytorch_lightning.strategies import ( StrategyRegistry, TPUSpawnStrategy, ) -from pytorch_lightning.strategies.ddp_spawn import _DDP_FORK_ALIASES -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _IPU_AVAILABLE -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn +from lightning.pytorch.strategies.ddp_spawn import _DDP_FORK_ALIASES +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _IPU_AVAILABLE +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/trainer/connectors/callback_connector.py b/src/lightning/pytorch/trainer/connectors/callback_connector.py similarity index 94% rename from src/pytorch_lightning/trainer/connectors/callback_connector.py rename to src/lightning/pytorch/trainer/connectors/callback_connector.py index 3c56901c1d..dafbe202a4 100644 --- a/src/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/src/lightning/pytorch/trainer/connectors/callback_connector.py @@ -17,8 +17,8 @@ import os from datetime import timedelta from typing import Dict, List, Optional, Sequence, Union -import pytorch_lightning as pl -from pytorch_lightning.callbacks import ( +import lightning.pytorch as pl +from lightning.pytorch.callbacks import ( Callback, Checkpoint, GradientAccumulationScheduler, @@ -28,14 +28,14 @@ from pytorch_lightning.callbacks import ( RichProgressBar, TQDMProgressBar, ) -from pytorch_lightning.callbacks.batch_size_finder import BatchSizeFinder -from pytorch_lightning.callbacks.lr_finder import LearningRateFinder -from pytorch_lightning.callbacks.rich_model_summary import RichModelSummary -from pytorch_lightning.callbacks.timer import Timer -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0, _PYTHON_GREATER_EQUAL_3_10_0 -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_info +from lightning.pytorch.callbacks.batch_size_finder import BatchSizeFinder +from lightning.pytorch.callbacks.lr_finder import LearningRateFinder +from lightning.pytorch.callbacks.rich_model_summary import RichModelSummary +from lightning.pytorch.callbacks.timer import Timer +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0, _PYTHON_GREATER_EQUAL_3_10_0 +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_info _log = logging.getLogger(__name__) @@ -196,7 +196,7 @@ class CallbackConnector: If a callback returned by the model's configure_callback method has the same type as one or several callbacks already present in the trainer callbacks list, it will replace them. - In addition, all :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks + In addition, all :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks will be pushed to the end of the list, ensuring they run last. """ model_callbacks = self.trainer._call_lightning_module_hook("configure_callbacks") @@ -256,7 +256,7 @@ def _configure_external_callbacks() -> List[Callback]: Return: A list of all callbacks collected from external factories. """ - group = "pytorch_lightning.callbacks_factory" + group = "lightning.pytorch.callbacks_factory" if _PYTHON_GREATER_EQUAL_3_8_0: from importlib.metadata import entry_points diff --git a/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py similarity index 96% rename from src/pytorch_lightning/trainer/connectors/checkpoint_connector.py rename to src/lightning/pytorch/trainer/connectors/checkpoint_connector.py index 9e0d2ed215..68a86d91e8 100644 --- a/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py @@ -24,18 +24,18 @@ from fsspec.implementations.local import LocalFileSystem from torch import Tensor from torchmetrics import Metric -import pytorch_lightning as pl -from lightning_fabric.plugins.environments.slurm import SLURMEnvironment -from lightning_fabric.utilities.cloud_io import get_filesystem -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.plugins.precision import MixedPrecisionPlugin -from pytorch_lightning.trainer.states import TrainerFn -from pytorch_lightning.utilities import _OMEGACONF_AVAILABLE -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.migration import pl_legacy_patch -from pytorch_lightning.utilities.migration.utils import _pl_migrate_checkpoint -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.plugins.environments.slurm import SLURMEnvironment +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.callbacks import ModelCheckpoint +from lightning.pytorch.plugins.precision import MixedPrecisionPlugin +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities import _OMEGACONF_AVAILABLE +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.migration import pl_legacy_patch +from lightning.pytorch.utilities.migration.utils import _pl_migrate_checkpoint +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn if _OMEGACONF_AVAILABLE: from omegaconf import Container @@ -122,7 +122,7 @@ class CheckpointConnector: if ckpt_path is None and SLURMEnvironment.detect() and self._hpc_resume_path is not None: ckpt_path = "hpc" - from pytorch_lightning.callbacks.on_exception_checkpoint import OnExceptionCheckpoint + from lightning.pytorch.callbacks.on_exception_checkpoint import OnExceptionCheckpoint ft_checkpoints = [cb for cb in self.trainer.callbacks if isinstance(cb, OnExceptionCheckpoint)] fn = state_fn.value @@ -327,7 +327,7 @@ class CheckpointConnector: if callback_states is None: return - from pytorch_lightning.callbacks.quantization import QuantizationAwareTraining # avoid circular import + from lightning.pytorch.callbacks.quantization import QuantizationAwareTraining # avoid circular import for callback in self.trainer.callbacks: if not isinstance(callback, QuantizationAwareTraining): diff --git a/src/pytorch_lightning/trainer/connectors/data_connector.py b/src/lightning/pytorch/trainer/connectors/data_connector.py similarity index 95% rename from src/pytorch_lightning/trainer/connectors/data_connector.py rename to src/lightning/pytorch/trainer/connectors/data_connector.py index 271d4dd2c2..0a1cf668a6 100644 --- a/src/pytorch_lightning/trainer/connectors/data_connector.py +++ b/src/lightning/pytorch/trainer/connectors/data_connector.py @@ -21,20 +21,20 @@ from lightning_utilities.core.apply_func import apply_to_collection from torch.utils.data import BatchSampler, DataLoader, Sampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler -import pytorch_lightning as pl -from lightning_fabric.utilities.data import _auto_add_worker_init_fn, _replace_dunder_methods, has_iterable_dataset -from lightning_fabric.utilities.distributed import DistributedSamplerWrapper -from pytorch_lightning.accelerators.ipu import IPUAccelerator -from pytorch_lightning.overrides.distributed import UnrepeatedDistributedSamplerWrapper -from pytorch_lightning.strategies import DDPSpawnStrategy -from pytorch_lightning.trainer.states import RunningStage, TrainerFn -from pytorch_lightning.trainer.supporters import CombinedLoader, CycleIterator -from pytorch_lightning.utilities.data import _is_dataloader_shuffled, _update_dataloader, has_len_all_ranks -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_warn, WarningCache -from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS -from pytorch_lightning.utilities.warnings import PossibleUserWarning +import lightning.pytorch as pl +from lightning.fabric.utilities.data import _auto_add_worker_init_fn, _replace_dunder_methods, has_iterable_dataset +from lightning.fabric.utilities.distributed import DistributedSamplerWrapper +from lightning.pytorch.accelerators.ipu import IPUAccelerator +from lightning.pytorch.overrides.distributed import UnrepeatedDistributedSamplerWrapper +from lightning.pytorch.strategies import DDPSpawnStrategy +from lightning.pytorch.trainer.states import RunningStage, TrainerFn +from lightning.pytorch.trainer.supporters import CombinedLoader, CycleIterator +from lightning.pytorch.utilities.data import _is_dataloader_shuffled, _update_dataloader, has_len_all_ranks +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_warn, WarningCache +from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS +from lightning.pytorch.utilities.warnings import PossibleUserWarning warning_cache = WarningCache() @@ -489,8 +489,8 @@ class _DataLoaderSource: The source can be - 1. from a ``*_datalaoder()`` method on the :class:`~pytorch_lightning.core.module.LightningModule`, - 2. from a ``*_datalaoder()`` method on the :class:`~pytorch_lightning.core.datamodule.LightningDataModule`, + 1. from a ``*_datalaoder()`` method on the :class:`~lightning.pytorch.core.module.LightningModule`, + 2. from a ``*_datalaoder()`` method on the :class:`~lightning.pytorch.core.datamodule.LightningDataModule`, 3. a direct instance of a :class:`~torch.utils.data.DataLoader` or supported collections thereof. Arguments: @@ -538,8 +538,8 @@ class _DataHookSelector: The hook source can be: - 1. the :class:`~pytorch_lightning.core.module.LightningModule`, - 2. the :class:`~pytorch_lightning.core.datamodule.LightningDataModule`, + 1. the :class:`~lightning.pytorch.core.module.LightningModule`, + 2. the :class:`~lightning.pytorch.core.datamodule.LightningDataModule`, Arguments: model: A ``LightningModule`` diff --git a/src/lightning/pytorch/trainer/connectors/logger_connector/__init__.py b/src/lightning/pytorch/trainer/connectors/logger_connector/__init__.py new file mode 100644 index 0000000000..9566d03b98 --- /dev/null +++ b/src/lightning/pytorch/trainer/connectors/logger_connector/__init__.py @@ -0,0 +1 @@ +from lightning.pytorch.trainer.connectors.logger_connector.logger_connector import LoggerConnector # noqa: F401 diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py b/src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py similarity index 99% rename from src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py rename to src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py index 5f7b8660bb..5790f9dbe0 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py +++ b/src/lightning/pytorch/trainer/connectors/logger_connector/fx_validator.py @@ -15,7 +15,7 @@ from typing import Optional, Tuple, Union from typing_extensions import TypedDict -from pytorch_lightning.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.exceptions import MisconfigurationException class _FxValidator: diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/src/lightning/pytorch/trainer/connectors/logger_connector/logger_connector.py similarity index 95% rename from src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py rename to src/lightning/pytorch/trainer/connectors/logger_connector/logger_connector.py index f90822a19f..ebc03d64af 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/src/lightning/pytorch/trainer/connectors/logger_connector/logger_connector.py @@ -17,13 +17,13 @@ from lightning_utilities.core.apply_func import apply_to_collection from lightning_utilities.core.rank_zero import WarningCache from torch import Tensor -import pytorch_lightning as pl -from lightning_fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE -from lightning_fabric.plugins.environments import SLURMEnvironment -from lightning_fabric.utilities import move_data_to_device -from lightning_fabric.utilities.apply_func import convert_tensors_to_scalars -from pytorch_lightning.loggers import CSVLogger, Logger, TensorBoardLogger -from pytorch_lightning.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT +import lightning.pytorch as pl +from lightning.fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE +from lightning.fabric.plugins.environments import SLURMEnvironment +from lightning.fabric.utilities import move_data_to_device +from lightning.fabric.utilities.apply_func import convert_tensors_to_scalars +from lightning.pytorch.loggers import CSVLogger, Logger, TensorBoardLogger +from lightning.pytorch.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT warning_cache = WarningCache() @@ -62,7 +62,7 @@ class LoggerConnector: logger_ = TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id()) else: warning_cache.warn( - "Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `pytorch_lightning`" + "Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `lightning.pytorch`" " package, due to potential conflicts with other packages in the ML ecosystem. For this reason," " `logger=True` will use `CSVLogger` as the default logger, unless the `tensorboard`" " or `tensorboardX` packages are found." diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/result.py b/src/lightning/pytorch/trainer/connectors/logger_connector/result.py similarity index 96% rename from src/pytorch_lightning/trainer/connectors/logger_connector/result.py rename to src/lightning/pytorch/trainer/connectors/logger_connector/result.py index 1ce9328291..de9c3b77d0 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/result.py +++ b/src/lightning/pytorch/trainer/connectors/logger_connector/result.py @@ -21,15 +21,15 @@ from torch import Tensor from torchmetrics import Metric from typing_extensions import TypedDict -from lightning_fabric.utilities import move_data_to_device -from lightning_fabric.utilities.apply_func import convert_tensors_to_scalars -from lightning_fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin -from lightning_fabric.utilities.distributed import _distributed_available -from pytorch_lightning.utilities.data import extract_batch_size -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.memory import recursive_detach -from pytorch_lightning.utilities.rank_zero import rank_zero_warn, WarningCache -from pytorch_lightning.utilities.warnings import PossibleUserWarning +from lightning.fabric.utilities import move_data_to_device +from lightning.fabric.utilities.apply_func import convert_tensors_to_scalars +from lightning.fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin +from lightning.fabric.utilities.distributed import _distributed_available +from lightning.pytorch.utilities.data import extract_batch_size +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.memory import recursive_detach +from lightning.pytorch.utilities.rank_zero import rank_zero_warn, WarningCache +from lightning.pytorch.utilities.warnings import PossibleUserWarning _IN_METRIC = Union[Metric, Tensor] # Do not include scalars as they were converted to tensors _OUT_METRIC = Union[Tensor, Dict[str, Tensor]] @@ -182,7 +182,7 @@ class _Metadata: class _ResultMetric(Metric, _DeviceDtypeModuleMixin): - """Wraps the value provided to `:meth:`~pytorch_lightning.core.module.LightningModule.log`""" + """Wraps the value provided to `:meth:`~lightning.pytorch.core.module.LightningModule.log`""" def __init__(self, metadata: _Metadata, is_tensor: bool) -> None: super().__init__() @@ -297,7 +297,7 @@ class _ResultMetric(Metric, _DeviceDtypeModuleMixin): class _ResultCollection(dict): """Collection (dictionary) of - :class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric` + :class:`~lightning.pytorch.trainer.connectors.logger_connector.result._ResultMetric` Example: @@ -358,7 +358,7 @@ class _ResultCollection(dict): metric_attribute: Optional[str] = None, rank_zero_only: bool = False, ) -> None: - """See :meth:`~pytorch_lightning.core.module.LightningModule.log`""" + """See :meth:`~lightning.pytorch.core.module.LightningModule.log`""" # no metrics should be logged with graphs if not enable_graph: value = recursive_detach(value) diff --git a/src/pytorch_lightning/trainer/connectors/signal_connector.py b/src/lightning/pytorch/trainer/connectors/signal_connector.py similarity index 96% rename from src/pytorch_lightning/trainer/connectors/signal_connector.py rename to src/lightning/pytorch/trainer/connectors/signal_connector.py index 3897d326e7..4d74279918 100644 --- a/src/pytorch_lightning/trainer/connectors/signal_connector.py +++ b/src/lightning/pytorch/trainer/connectors/signal_connector.py @@ -9,11 +9,11 @@ from typing import Any, Callable, Dict, List, Set, Union from lightning_utilities.core.rank_zero import rank_prefixed_message -import pytorch_lightning as pl -from lightning_fabric.plugins.environments import SLURMEnvironment -from lightning_fabric.utilities.imports import _IS_WINDOWS -from pytorch_lightning.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0 -from pytorch_lightning.utilities.rank_zero import rank_zero_info +import lightning.pytorch as pl +from lightning.fabric.plugins.environments import SLURMEnvironment +from lightning.fabric.utilities.imports import _IS_WINDOWS +from lightning.pytorch.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0 +from lightning.pytorch.utilities.rank_zero import rank_zero_info # copied from signal.pyi _SIGNUM = Union[int, signal.Signals] diff --git a/src/pytorch_lightning/trainer/setup.py b/src/lightning/pytorch/trainer/setup.py similarity index 95% rename from src/pytorch_lightning/trainer/setup.py rename to src/lightning/pytorch/trainer/setup.py index f24e812593..222dc38040 100644 --- a/src/pytorch_lightning/trainer/setup.py +++ b/src/lightning/pytorch/trainer/setup.py @@ -15,17 +15,17 @@ from typing import Optional, Union -import pytorch_lightning as pl -from lightning_fabric.utilities.warnings import PossibleUserWarning -from pytorch_lightning.accelerators import ( +import lightning.pytorch as pl +from lightning.fabric.utilities.warnings import PossibleUserWarning +from lightning.pytorch.accelerators import ( CUDAAccelerator, HPUAccelerator, IPUAccelerator, MPSAccelerator, TPUAccelerator, ) -from pytorch_lightning.loggers.logger import DummyLogger -from pytorch_lightning.profilers import ( +from lightning.pytorch.loggers.logger import DummyLogger +from lightning.pytorch.profilers import ( AdvancedProfiler, PassThroughProfiler, Profiler, @@ -33,9 +33,9 @@ from pytorch_lightning.profilers import ( SimpleProfiler, XLAProfiler, ) -from pytorch_lightning.utilities import _HPU_AVAILABLE, _IPU_AVAILABLE -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn +from lightning.pytorch.utilities import _HPU_AVAILABLE, _IPU_AVAILABLE +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn def _init_debugging_flags( diff --git a/src/pytorch_lightning/trainer/states.py b/src/lightning/pytorch/trainer/states.py similarity index 84% rename from src/pytorch_lightning/trainer/states.py rename to src/lightning/pytorch/trainer/states.py index 44a330a0c8..3f47896642 100644 --- a/src/pytorch_lightning/trainer/states.py +++ b/src/lightning/pytorch/trainer/states.py @@ -14,11 +14,11 @@ from dataclasses import dataclass from typing import Optional -from pytorch_lightning.utilities import LightningEnum +from lightning.pytorch.utilities import LightningEnum class TrainerStatus(LightningEnum): - """Enum for the status of the :class:`~pytorch_lightning.trainer.trainer.Trainer`""" + """Enum for the status of the :class:`~lightning.pytorch.trainer.trainer.Trainer`""" INITIALIZING = "initializing" # trainer creation RUNNING = "running" @@ -32,9 +32,9 @@ class TrainerStatus(LightningEnum): class TrainerFn(LightningEnum): """ - Enum for the user-facing functions of the :class:`~pytorch_lightning.trainer.trainer.Trainer` - such as :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` and - :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`. + Enum for the user-facing functions of the :class:`~lightning.pytorch.trainer.trainer.Trainer` + such as :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` and + :meth:`~lightning.pytorch.trainer.trainer.Trainer.test`. """ FITTING = "fit" @@ -76,7 +76,7 @@ class RunningStage(LightningEnum): @dataclass class TrainerState: - """Dataclass to encapsulate the current :class:`~pytorch_lightning.trainer.trainer.Trainer` state.""" + """Dataclass to encapsulate the current :class:`~lightning.pytorch.trainer.trainer.Trainer` state.""" status: TrainerStatus = TrainerStatus.INITIALIZING fn: Optional[TrainerFn] = None diff --git a/src/pytorch_lightning/trainer/supporters.py b/src/lightning/pytorch/trainer/supporters.py similarity index 98% rename from src/pytorch_lightning/trainer/supporters.py rename to src/lightning/pytorch/trainer/supporters.py index 2310f71375..31de35b31e 100644 --- a/src/pytorch_lightning/trainer/supporters.py +++ b/src/lightning/pytorch/trainer/supporters.py @@ -21,7 +21,7 @@ from torch.utils.data import Dataset from torch.utils.data.dataloader import _BaseDataLoaderIter, _MultiProcessingDataLoaderIter, DataLoader from torch.utils.data.dataset import IterableDataset -from pytorch_lightning.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.exceptions import MisconfigurationException @dataclass @@ -280,7 +280,7 @@ class CombinedLoader: Returns: the wrapped loaders """ - from pytorch_lightning.utilities.data import get_len + from lightning.pytorch.utilities.data import get_len all_lengths = apply_to_collection(self.loaders, Iterable, get_len, wrong_dtype=(Sequence, Mapping)) @@ -298,7 +298,7 @@ class CombinedLoader: def _apply_cycle_iterator_length(self) -> None: """When the model is `max_size_cycle`, compute the length across all ``CycleIterator`` and re-assign it to all dataloaders.""" - from pytorch_lightning.utilities.data import get_len + from lightning.pytorch.utilities.data import get_len if self.mode != "max_size_cycle": return @@ -334,7 +334,7 @@ class CombinedLoader: Returns: length: the minimum length of loaders """ - from pytorch_lightning.utilities.data import get_len + from lightning.pytorch.utilities.data import get_len all_lengths = apply_to_collection(loaders, Iterable, get_len, wrong_dtype=(Sequence, Mapping)) diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/lightning/pytorch/trainer/trainer.py similarity index 95% rename from src/pytorch_lightning/trainer/trainer.py rename to src/lightning/pytorch/trainer/trainer.py index 29eeb8e4ed..58146fcd25 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/lightning/pytorch/trainer/trainer.py @@ -41,56 +41,56 @@ from torch import Tensor from torch.optim import Optimizer from torch.utils.data import DataLoader -import pytorch_lightning as pl -from lightning_fabric.utilities.apply_func import convert_tensors_to_scalars -from lightning_fabric.utilities.cloud_io import get_filesystem -from lightning_fabric.utilities.data import _auto_add_worker_init_fn -from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0 -from lightning_fabric.utilities.types import _PATH -from lightning_fabric.utilities.warnings import PossibleUserWarning -from pytorch_lightning.accelerators import Accelerator, TPUAccelerator -from pytorch_lightning.callbacks import Callback, Checkpoint, EarlyStopping, ProgressBarBase -from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter -from pytorch_lightning.core.datamodule import LightningDataModule -from pytorch_lightning.loggers import Logger -from pytorch_lightning.loggers.tensorboard import TensorBoardLogger -from pytorch_lightning.loops import _PredictionLoop, _TrainingEpochLoop -from pytorch_lightning.loops.dataloader.evaluation_loop import _EvaluationLoop -from pytorch_lightning.loops.fit_loop import _FitLoop -from pytorch_lightning.loops.utilities import _parse_loop_limits, _reset_progress -from pytorch_lightning.plugins import PLUGIN_INPUT, PrecisionPlugin -from pytorch_lightning.profilers import Profiler -from pytorch_lightning.strategies import DDPStrategy, FSDPStrategy, ParallelStrategy, SingleDeviceStrategy, Strategy -from pytorch_lightning.trainer import call, setup -from pytorch_lightning.trainer.configuration_validator import verify_loop_configurations -from pytorch_lightning.trainer.connectors.accelerator_connector import ( +import lightning.pytorch as pl +from lightning.fabric.utilities.apply_func import convert_tensors_to_scalars +from lightning.fabric.utilities.cloud_io import get_filesystem +from lightning.fabric.utilities.data import _auto_add_worker_init_fn +from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_0 +from lightning.fabric.utilities.types import _PATH +from lightning.fabric.utilities.warnings import PossibleUserWarning +from lightning.pytorch.accelerators import Accelerator, TPUAccelerator +from lightning.pytorch.callbacks import Callback, Checkpoint, EarlyStopping, ProgressBarBase +from lightning.pytorch.callbacks.prediction_writer import BasePredictionWriter +from lightning.pytorch.core.datamodule import LightningDataModule +from lightning.pytorch.loggers import Logger +from lightning.pytorch.loggers.tensorboard import TensorBoardLogger +from lightning.pytorch.loops import _PredictionLoop, _TrainingEpochLoop +from lightning.pytorch.loops.dataloader.evaluation_loop import _EvaluationLoop +from lightning.pytorch.loops.fit_loop import _FitLoop +from lightning.pytorch.loops.utilities import _parse_loop_limits, _reset_progress +from lightning.pytorch.plugins import PLUGIN_INPUT, PrecisionPlugin +from lightning.pytorch.profilers import Profiler +from lightning.pytorch.strategies import DDPStrategy, FSDPStrategy, ParallelStrategy, SingleDeviceStrategy, Strategy +from lightning.pytorch.trainer import call, setup +from lightning.pytorch.trainer.configuration_validator import verify_loop_configurations +from lightning.pytorch.trainer.connectors.accelerator_connector import ( _LITERAL_WARN, _PRECISION_INPUT, _PRECISION_INPUT_STR, AcceleratorConnector, ) -from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector -from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector -from pytorch_lightning.trainer.connectors.data_connector import DataConnector -from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector -from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, _PBAR_DICT, _ResultCollection -from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector -from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus -from pytorch_lightning.trainer.supporters import CombinedLoader -from pytorch_lightning.utilities import GradClipAlgorithmType, parsing -from pytorch_lightning.utilities.argparse import ( +from lightning.pytorch.trainer.connectors.callback_connector import CallbackConnector +from lightning.pytorch.trainer.connectors.checkpoint_connector import CheckpointConnector +from lightning.pytorch.trainer.connectors.data_connector import DataConnector +from lightning.pytorch.trainer.connectors.logger_connector import LoggerConnector +from lightning.pytorch.trainer.connectors.logger_connector.result import _OUT_DICT, _PBAR_DICT, _ResultCollection +from lightning.pytorch.trainer.connectors.signal_connector import SignalConnector +from lightning.pytorch.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus +from lightning.pytorch.trainer.supporters import CombinedLoader +from lightning.pytorch.utilities import GradClipAlgorithmType, parsing +from lightning.pytorch.utilities.argparse import ( _defaults_from_env_vars, add_argparse_args, from_argparse_args, parse_argparser, parse_env_variables, ) -from pytorch_lightning.utilities.data import has_len_all_ranks -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn -from pytorch_lightning.utilities.seed import isolate_rng -from pytorch_lightning.utilities.types import ( +from lightning.pytorch.utilities.data import has_len_all_ranks +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.model_helpers import is_overridden +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn +from lightning.pytorch.utilities.seed import isolate_rng +from lightning.pytorch.utilities.types import ( _EVALUATE_OUTPUT, _PREDICT_OUTPUT, EVAL_DATALOADERS, @@ -163,7 +163,7 @@ class Trainer: benchmark: The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to. The value for ``torch.backends.cudnn.benchmark`` set in the current session will be used - (``False`` if not manually set). If :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is set + (``False`` if not manually set). If :paramref:`~lightning.pytorch.trainer.Trainer.deterministic` is set to ``True``, this will default to ``False``. Override to manually set a different value. Default: ``None``. @@ -172,7 +172,7 @@ class Trainer: enable_checkpointing: If ``True``, enable checkpointing. It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in - :paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`. + :paramref:`~lightning.pytorch.trainer.trainer.Trainer.callbacks`. Default: ``True``. check_val_every_n_epoch: Perform a validation loop every after every `N` training epochs. If ``None``, @@ -474,7 +474,7 @@ class Trainer: model: Model to fit. train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a - :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples. + :class:`~lightning.pytorch.core.datamodule.LightningDataModule` specifying training samples. In the case of multiple dataloaders, please see this :ref:`section `. val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples. @@ -483,7 +483,7 @@ class Trainer: keywords ``"last"`` and ``"hpc"``. If there is no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint, training will start from the beginning of the next epoch. - datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`. + datamodule: An instance of :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. """ model = self._maybe_unwrap_optimized(model) self.strategy._lightning_module = model @@ -548,7 +548,7 @@ class Trainer: model: The model to validate. dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them, - or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying validation samples. + or a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` specifying validation samples. ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"`` or path to the checkpoint you wish to validate. If ``None`` and the model instance was passed, use the current weights. @@ -557,12 +557,12 @@ class Trainer: verbose: If True, prints the validation results. - datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`. + datamodule: An instance of :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. Returns: List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks - like :meth:`~pytorch_lightning.core.module.LightningModule.validation_step`, - :meth:`~pytorch_lightning.core.module.LightningModule.validation_epoch_end`, etc. + like :meth:`~lightning.pytorch.core.module.LightningModule.validation_step`, + :meth:`~lightning.pytorch.core.module.LightningModule.validation_epoch_end`, etc. The length of the list corresponds to the number of validation dataloaders used. """ if model is None: @@ -641,7 +641,7 @@ class Trainer: model: The model to test. dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them, - or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying test samples. + or a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` specifying test samples. ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"`` or path to the checkpoint you wish to test. If ``None`` and the model instance was passed, use the current weights. @@ -650,12 +650,12 @@ class Trainer: verbose: If True, prints the test results. - datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`. + datamodule: An instance of :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. Returns: List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks - like :meth:`~pytorch_lightning.core.module.LightningModule.test_step`, - :meth:`~pytorch_lightning.core.module.LightningModule.test_epoch_end`, etc. + like :meth:`~lightning.pytorch.core.module.LightningModule.test_step`, + :meth:`~lightning.pytorch.core.module.LightningModule.test_epoch_end`, etc. The length of the list corresponds to the number of test dataloaders used. """ if model is None: @@ -735,7 +735,7 @@ class Trainer: model: The model to predict with. dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them, - or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying prediction samples. + or a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` specifying prediction samples. datamodule: The datamodule with a predict_dataloader method that returns one or more dataloaders. @@ -1571,7 +1571,7 @@ class Trainer: """The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel. To access the pure LightningModule, use - :meth:`~pytorch_lightning.trainer.trainer.Trainer.lightning_module` instead. + :meth:`~lightning.pytorch.trainer.trainer.Trainer.lightning_module` instead. """ return self.strategy.model @@ -1626,39 +1626,39 @@ class Trainer: @property def early_stopping_callback(self) -> Optional[EarlyStopping]: - """The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the + """The first :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback in the Trainer.callbacks list, or ``None`` if it doesn't exist.""" callbacks = self.early_stopping_callbacks return callbacks[0] if len(callbacks) > 0 else None @property def early_stopping_callbacks(self) -> List[EarlyStopping]: - """A list of all instances of :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` found in + """A list of all instances of :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` found in the Trainer.callbacks list.""" return [c for c in self.callbacks if isinstance(c, EarlyStopping)] @property def prediction_writer_callbacks(self) -> List[BasePredictionWriter]: - """A list of all instances of :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter` + """A list of all instances of :class:`~lightning.pytorch.callbacks.prediction_writer.BasePredictionWriter` found in the Trainer.callbacks list.""" return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)] @property def checkpoint_callback(self) -> Optional[Checkpoint]: - """The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the + """The first :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callback in the Trainer.callbacks list, or ``None`` if it doesn't exist.""" callbacks = self.checkpoint_callbacks return callbacks[0] if len(callbacks) > 0 else None @property def checkpoint_callbacks(self) -> List[Checkpoint]: - """A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found + """A list of all instances of :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` found in the Trainer.callbacks list.""" return [c for c in self.callbacks if isinstance(c, Checkpoint)] @property def progress_bar_callback(self) -> Optional[ProgressBarBase]: - """An instance of :class:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase` found in the + """An instance of :class:`~lightning.pytorch.callbacks.progress.base.ProgressBarBase` found in the Trainer.callbacks list, or ``None`` if one doesn't exist.""" for c in self.callbacks: if isinstance(c, ProgressBarBase): @@ -1667,10 +1667,10 @@ class Trainer: @property def ckpt_path(self) -> Optional[_PATH]: - """Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`, - :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`, - :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`, or - :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. ``None`` otherwise.""" + """Set to the path/URL of a checkpoint loaded via :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit`, + :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate`, + :meth:`~lightning.pytorch.trainer.trainer.Trainer.test`, or + :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`. ``None`` otherwise.""" return self._checkpoint_connector._ckpt_path @ckpt_path.setter @@ -1893,7 +1893,7 @@ class Trainer: @property def callback_metrics(self) -> Dict: # TODO: the true typing return can include dictionaries as defined in - # `pytorch_lightning.trainer.connectors.logger_connector.result._OUT_DICT` + # `lightning.pytorch.trainer.connectors.logger_connector.result._OUT_DICT` return self._logger_connector.callback_metrics @property diff --git a/src/pytorch_lightning/tuner/__init__.py b/src/lightning/pytorch/tuner/__init__.py similarity index 90% rename from src/pytorch_lightning/tuner/__init__.py rename to src/lightning/pytorch/tuner/__init__.py index dc816988a1..987bb8cb4f 100644 --- a/src/pytorch_lightning/tuner/__init__.py +++ b/src/lightning/pytorch/tuner/__init__.py @@ -11,4 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.tuner.tuning import Tuner # noqa: F401 +from lightning.pytorch.tuner.tuning import Tuner # noqa: F401 diff --git a/src/pytorch_lightning/tuner/batch_size_scaling.py b/src/lightning/pytorch/tuner/batch_size_scaling.py similarity index 96% rename from src/pytorch_lightning/tuner/batch_size_scaling.py rename to src/lightning/pytorch/tuner/batch_size_scaling.py index f6c2a3eead..e8d221dab0 100644 --- a/src/pytorch_lightning/tuner/batch_size_scaling.py +++ b/src/lightning/pytorch/tuner/batch_size_scaling.py @@ -17,10 +17,10 @@ import uuid from copy import deepcopy from typing import Any, Dict, Optional, Tuple -import pytorch_lightning as pl -from pytorch_lightning.utilities.memory import garbage_collection_cuda, is_oom_error -from pytorch_lightning.utilities.parsing import lightning_getattr, lightning_setattr -from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.utilities.memory import garbage_collection_cuda, is_oom_error +from lightning.pytorch.utilities.parsing import lightning_getattr, lightning_setattr +from lightning.pytorch.utilities.rank_zero import rank_zero_info, rank_zero_warn log = logging.getLogger(__name__) @@ -123,7 +123,7 @@ def __scale_batch_dump_params(trainer: "pl.Trainer") -> Dict[str, Any]: def __scale_batch_reset_params(trainer: "pl.Trainer", steps_per_trial: int) -> None: - from pytorch_lightning.loggers.logger import DummyLogger + from lightning.pytorch.loggers.logger import DummyLogger trainer.logger = DummyLogger() if trainer.logger is not None else None trainer.callbacks = [] @@ -282,7 +282,7 @@ def _adjust_batch_size( """Helper function for adjusting the batch size. Args: - trainer: instance of pytorch_lightning.Trainer + trainer: instance of lightning.pytorch.Trainer factor: value which the old batch size is multiplied by to get the new batch size value: if a value is given, will override the batch size with this value. @@ -301,7 +301,7 @@ def _adjust_batch_size( rank_zero_info(f"Batch size {batch_size} {desc}, trying batch size {new_size}") if trainer.state.fn == "fit": - from pytorch_lightning.trainer.supporters import CombinedLoader + from lightning.pytorch.trainer.supporters import CombinedLoader if trainer.train_dataloader is None: trainer.reset_train_dataloader() @@ -331,7 +331,7 @@ def _adjust_batch_size( def _is_valid_batch_size( batch_size: int, dataloader: "pl.trainer.supporters.CombinedLoader", trainer: "pl.Trainer" ) -> bool: - from pytorch_lightning.utilities.data import has_len_all_ranks + from lightning.pytorch.utilities.data import has_len_all_ranks module = trainer.lightning_module or trainer.datamodule has_len = has_len_all_ranks(dataloader, trainer.strategy, module) diff --git a/src/pytorch_lightning/tuner/lr_finder.py b/src/lightning/pytorch/tuner/lr_finder.py similarity index 97% rename from src/pytorch_lightning/tuner/lr_finder.py rename to src/lightning/pytorch/tuner/lr_finder.py index bdc11f9285..49f017012e 100644 --- a/src/pytorch_lightning/tuner/lr_finder.py +++ b/src/lightning/pytorch/tuner/lr_finder.py @@ -22,13 +22,13 @@ import numpy as np import torch from lightning_utilities.core.imports import RequirementCache -import pytorch_lightning as pl -from lightning_fabric.utilities.types import _TORCH_LRSCHEDULER -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_setattr -from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.types import LRScheduler, LRSchedulerConfig, STEP_OUTPUT +import lightning.pytorch as pl +from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER +from lightning.pytorch.callbacks import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.parsing import lightning_hasattr, lightning_setattr +from lightning.pytorch.utilities.rank_zero import rank_zero_warn +from lightning.pytorch.utilities.types import LRScheduler, LRSchedulerConfig, STEP_OUTPUT # check if ipywidgets is installed before importing tqdm.auto # to ensure it won't fail and a progress bar is displayed @@ -106,7 +106,7 @@ class _LRFinder: # TODO: update docs here """Decorate `trainer.strategy.setup_optimizers` method such that it sets the user's originally specified optimizer together with a new scheduler that takes care of the learning rate search.""" - from pytorch_lightning.core.optimizer import _validate_optimizers_attached + from lightning.pytorch.core.optimizer import _validate_optimizers_attached optimizers = trainer.strategy.optimizers @@ -312,7 +312,7 @@ def __lr_finder_dump_params(trainer: "pl.Trainer") -> Dict[str, Any]: def __lr_finder_reset_params(trainer: "pl.Trainer", num_training: int, early_stop_threshold: Optional[float]) -> None: - from pytorch_lightning.loggers.logger import DummyLogger + from lightning.pytorch.loggers.logger import DummyLogger trainer.strategy.lr_scheduler_configs = [] # Use special lr logger callback diff --git a/src/pytorch_lightning/tuner/tuning.py b/src/lightning/pytorch/tuner/tuning.py similarity index 92% rename from src/pytorch_lightning/tuner/tuning.py rename to src/lightning/pytorch/tuner/tuning.py index 056bddf3a6..afe672925d 100644 --- a/src/pytorch_lightning/tuner/tuning.py +++ b/src/lightning/pytorch/tuner/tuning.py @@ -13,10 +13,10 @@ # limitations under the License. from typing import Literal, Optional, Union -import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS +import lightning.pytorch as pl +from lightning.pytorch.callbacks.callback import Callback +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS class Tuner: @@ -45,12 +45,12 @@ class Tuner: Args: model: Model to tune. train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a - :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples. + :class:`~lightning.pytorch.core.datamodule.LightningDataModule` specifying training samples. In the case of multiple dataloaders, please see this :ref:`section `. val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples. dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying val/test/predict samples used for running tuner on validation/testing/prediction. - datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`. + datamodule: An instance of :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. method: Method to run tuner on. It can be any of ``("fit", "validate", "test", "predict")``. mode: Search strategy to update the batch size: @@ -76,7 +76,7 @@ class Tuner: _check_scale_batch_size_configuration(self._trainer) # local import to avoid circular import - from pytorch_lightning.callbacks.batch_size_finder import BatchSizeFinder + from lightning.pytorch.callbacks.batch_size_finder import BatchSizeFinder batch_size_finder: Callback = BatchSizeFinder( mode=mode, @@ -123,12 +123,12 @@ class Tuner: Args: model: Model to tune. train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a - :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples. + :class:`~lightning.pytorch.core.datamodule.LightningDataModule` specifying training samples. In the case of multiple dataloaders, please see this :ref:`section `. val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples. dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying val/test/predict samples used for running tuner on validation/testing/prediction. - datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`. + datamodule: An instance of :class:`~lightning.pytorch.core.datamodule.LightningDataModule`. method: Method to run tuner on. It can be any of ``("fit", "validate", "test", "predict")``. min_lr: minimum learning rate to investigate max_lr: maximum learning rate to investigate @@ -157,7 +157,7 @@ class Tuner: _check_lr_find_configuration(self._trainer) # local import to avoid circular import - from pytorch_lightning.callbacks.lr_finder import LearningRateFinder + from lightning.pytorch.callbacks.lr_finder import LearningRateFinder lr_finder_callback: Callback = LearningRateFinder( min_lr=min_lr, @@ -205,7 +205,7 @@ def _check_tuner_configuration( def _check_lr_find_configuration(trainer: "pl.Trainer") -> None: # local import to avoid circular import - from pytorch_lightning.callbacks.lr_finder import LearningRateFinder + from lightning.pytorch.callbacks.lr_finder import LearningRateFinder configured_callbacks = [cb for cb in trainer.callbacks if isinstance(cb, LearningRateFinder)] if configured_callbacks: @@ -223,7 +223,7 @@ def _check_scale_batch_size_configuration(trainer: "pl.Trainer") -> None: ) # local import to avoid circular import - from pytorch_lightning.callbacks.batch_size_finder import BatchSizeFinder + from lightning.pytorch.callbacks.batch_size_finder import BatchSizeFinder configured_callbacks = [cb for cb in trainer.callbacks if isinstance(cb, BatchSizeFinder)] if configured_callbacks: diff --git a/src/pytorch_lightning/utilities/__init__.py b/src/lightning/pytorch/utilities/__init__.py similarity index 68% rename from src/pytorch_lightning/utilities/__init__.py rename to src/lightning/pytorch/utilities/__init__.py index 0f0d311dd8..14fb980e78 100644 --- a/src/pytorch_lightning/utilities/__init__.py +++ b/src/lightning/pytorch/utilities/__init__.py @@ -15,11 +15,11 @@ import numpy -from lightning_fabric.utilities import LightningEnum # noqa: F401 -from lightning_fabric.utilities import move_data_to_device # noqa: F401 -from pytorch_lightning.utilities.enums import GradClipAlgorithmType # noqa: F401 -from pytorch_lightning.utilities.grads import grad_norm # noqa: F401 -from pytorch_lightning.utilities.imports import ( # noqa: F401 +from lightning.fabric.utilities import LightningEnum # noqa: F401 +from lightning.fabric.utilities import move_data_to_device # noqa: F401 +from lightning.pytorch.utilities.enums import GradClipAlgorithmType # noqa: F401 +from lightning.pytorch.utilities.grads import grad_norm # noqa: F401 +from lightning.pytorch.utilities.imports import ( # noqa: F401 _HPU_AVAILABLE, _IPU_AVAILABLE, _OMEGACONF_AVAILABLE, @@ -27,9 +27,9 @@ from pytorch_lightning.utilities.imports import ( # noqa: F401 _TORCH_QUANTIZE_AVAILABLE, _TORCHVISION_AVAILABLE, ) -from pytorch_lightning.utilities.parameter_tying import find_shared_parameters, set_shared_parameters # noqa: F401 -from pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict, is_picklable # noqa: F401 -from pytorch_lightning.utilities.rank_zero import ( # noqa: F401 +from lightning.pytorch.utilities.parameter_tying import find_shared_parameters, set_shared_parameters # noqa: F401 +from lightning.pytorch.utilities.parsing import AttributeDict, flatten_dict, is_picklable # noqa: F401 +from lightning.pytorch.utilities.rank_zero import ( # noqa: F401 rank_zero_deprecation, rank_zero_info, rank_zero_only, diff --git a/src/pytorch_lightning/utilities/argparse.py b/src/lightning/pytorch/utilities/argparse.py similarity index 96% rename from src/pytorch_lightning/utilities/argparse.py rename to src/lightning/pytorch/utilities/argparse.py index 1d4e9eec49..f4d35eea35 100644 --- a/src/pytorch_lightning/utilities/argparse.py +++ b/src/lightning/pytorch/utilities/argparse.py @@ -21,9 +21,9 @@ from contextlib import suppress from functools import wraps from typing import Any, Callable, cast, Dict, List, Tuple, Type, TypeVar, Union -import pytorch_lightning as pl -from pytorch_lightning.utilities.parsing import str_to_bool, str_to_bool_or_int, str_to_bool_or_str -from pytorch_lightning.utilities.types import _ADD_ARGPARSE_RETURN +import lightning.pytorch as pl +from lightning.pytorch.utilities.parsing import str_to_bool, str_to_bool_or_int, str_to_bool_or_str +from lightning.pytorch.utilities.types import _ADD_ARGPARSE_RETURN _T = TypeVar("_T", bound=Callable[..., Any]) _ARGPARSE_CLS = Union[Type["pl.LightningDataModule"], Type["pl.Trainer"]] @@ -46,7 +46,7 @@ def from_argparse_args( Examples: - >>> from pytorch_lightning import Trainer + >>> from lightning.pytorch import Trainer >>> parser = ArgumentParser(add_help=False) >>> parser = Trainer.add_argparse_args(parser) >>> parser.add_argument('--my_custom_arg', default='something') # doctest: +SKIP @@ -96,7 +96,7 @@ def parse_env_variables(cls: _ARGPARSE_CLS, template: str = "PL_%(cls_name)s_%(c Examples: - >>> from pytorch_lightning import Trainer + >>> from lightning.pytorch import Trainer >>> parse_env_variables(Trainer) Namespace() >>> import os @@ -130,7 +130,7 @@ def get_init_arguments_and_types(cls: _ARGPARSE_CLS) -> List[Tuple[str, Tuple, A Examples: - >>> from pytorch_lightning import Trainer + >>> from lightning.pytorch import Trainer >>> args = get_init_arguments_and_types(Trainer) """ @@ -159,7 +159,7 @@ def get_init_arguments_and_types(cls: _ARGPARSE_CLS) -> List[Tuple[str, Tuple, A def _get_abbrev_qualified_cls_name(cls: _ARGPARSE_CLS) -> str: assert isinstance(cls, type), repr(cls) - if cls.__module__.startswith("pytorch_lightning."): + if cls.__module__.startswith("lightning.pytorch."): # Abbreviate. return f"pl.{cls.__name__}" # Fully qualified. @@ -199,14 +199,14 @@ def add_argparse_args( >>> # Option 1: Default usage. >>> import argparse - >>> from pytorch_lightning import Trainer + >>> from lightning.pytorch import Trainer >>> parser = argparse.ArgumentParser() >>> parser = Trainer.add_argparse_args(parser) >>> args = parser.parse_args([]) >>> # Option 2: Disable use_argument_group (old behavior). >>> import argparse - >>> from pytorch_lightning import Trainer + >>> from lightning.pytorch import Trainer >>> parser = argparse.ArgumentParser() >>> parser = Trainer.add_argparse_args(parser, use_argument_group=False) >>> args = parser.parse_args([]) diff --git a/src/pytorch_lightning/utilities/data.py b/src/lightning/pytorch/utilities/data.py similarity index 96% rename from src/pytorch_lightning/utilities/data.py rename to src/lightning/pytorch/utilities/data.py index 7e17c35535..9ae5cf1503 100644 --- a/src/pytorch_lightning/utilities/data.py +++ b/src/lightning/pytorch/utilities/data.py @@ -28,15 +28,15 @@ from torch.utils.data import ( SequentialSampler, ) -import pytorch_lightning as pl -from lightning_fabric.utilities.data import _reinstantiate_wrapped_cls, _replace_value_in_saved_args -from lightning_fabric.utilities.data import has_iterable_dataset as new_has_iterable_dataset -from lightning_fabric.utilities.data import has_len as new_has_len -from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper -from pytorch_lightning.trainer.states import RunningStage -from pytorch_lightning.trainer.supporters import CombinedLoader -from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_warn, WarningCache +import lightning.pytorch as pl +from lightning.fabric.utilities.data import _reinstantiate_wrapped_cls, _replace_value_in_saved_args +from lightning.fabric.utilities.data import has_iterable_dataset as new_has_iterable_dataset +from lightning.fabric.utilities.data import has_len as new_has_len +from lightning.pytorch.overrides.distributed import IndexBatchSamplerWrapper +from lightning.pytorch.trainer.states import RunningStage +from lightning.pytorch.trainer.supporters import CombinedLoader +from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.rank_zero import rank_zero_warn, WarningCache # might be supported in later releases, see https://github.com/python/mypy/pull/13297 BType = Union[Tensor, str, Mapping[Any, "BType"], Iterable["BType"]] # type: ignore[misc] @@ -127,7 +127,7 @@ def has_len_all_ranks( except (TypeError, NotImplementedError): has_len = False - # we are checking using lightning_fabric, which doesn't know CombinedLoader + # we are checking using lightning.fabric, which doesn't know CombinedLoader if has_len and new_has_iterable_dataset(dataloader): # type: ignore [arg-type] rank_zero_warn( "Your `IterableDataset` has `__len__` defined." diff --git a/src/pytorch_lightning/utilities/deepspeed.py b/src/lightning/pytorch/utilities/deepspeed.py similarity index 96% rename from src/pytorch_lightning/utilities/deepspeed.py rename to src/lightning/pytorch/utilities/deepspeed.py index 23bdc6f56d..1fac2b3160 100644 --- a/src/pytorch_lightning/utilities/deepspeed.py +++ b/src/lightning/pytorch/utilities/deepspeed.py @@ -19,8 +19,8 @@ import os import torch -from lightning_fabric.utilities.types import _PATH -from pytorch_lightning.strategies.deepspeed import _DEEPSPEED_AVAILABLE +from lightning.fabric.utilities.types import _PATH +from lightning.pytorch.strategies.deepspeed import _DEEPSPEED_AVAILABLE if _DEEPSPEED_AVAILABLE: from deepspeed.utils.zero_to_fp32 import ( @@ -68,7 +68,7 @@ def convert_zero_checkpoint_to_fp32_state_dict( Examples: - >>> from pytorch_lightning.utilities.deepspeed import ( + >>> from lightning.pytorch.utilities.deepspeed import ( ... convert_zero_checkpoint_to_fp32_state_dict ... ) >>> # Lightning deepspeed has saved a directory instead of a file diff --git a/src/pytorch_lightning/utilities/distributed.py b/src/lightning/pytorch/utilities/distributed.py similarity index 97% rename from src/pytorch_lightning/utilities/distributed.py rename to src/lightning/pytorch/utilities/distributed.py index c50a0faee5..b16393273e 100644 --- a/src/pytorch_lightning/utilities/distributed.py +++ b/src/lightning/pytorch/utilities/distributed.py @@ -17,8 +17,8 @@ from typing import Any, Callable, Dict, Optional import torch from torch.nn.parallel.distributed import DistributedDataParallel -from lightning_fabric.utilities.distributed import _distributed_available as new_distributed_available -from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_info +from lightning.fabric.utilities.distributed import _distributed_available as new_distributed_available +from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info def register_ddp_comm_hook( diff --git a/src/pytorch_lightning/utilities/enums.py b/src/lightning/pytorch/utilities/enums.py similarity index 96% rename from src/pytorch_lightning/utilities/enums.py rename to src/lightning/pytorch/utilities/enums.py index bf00ddb8c8..288a4bd7bd 100644 --- a/src/pytorch_lightning/utilities/enums.py +++ b/src/lightning/pytorch/utilities/enums.py @@ -14,7 +14,7 @@ """Enumerated utilities.""" from __future__ import annotations -from lightning_fabric.utilities.enums import LightningEnum +from lightning.fabric.utilities.enums import LightningEnum class GradClipAlgorithmType(LightningEnum): diff --git a/src/pytorch_lightning/utilities/exceptions.py b/src/lightning/pytorch/utilities/exceptions.py similarity index 84% rename from src/pytorch_lightning/utilities/exceptions.py rename to src/lightning/pytorch/utilities/exceptions.py index 687bde1d7e..7900f0a903 100644 --- a/src/pytorch_lightning/utilities/exceptions.py +++ b/src/lightning/pytorch/utilities/exceptions.py @@ -11,16 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from lightning_fabric.utilities.exceptions import MisconfigurationException # noqa: F401 +from lightning.fabric.utilities.exceptions import MisconfigurationException # noqa: F401 class SIGTERMException(SystemExit): """Exception used when a :class:`signal.SIGTERM` is sent to a process. This exception is raised by the loops at specific points. It can be used to write custom logic in the - :meth:`pytorch_lightning.callbacks.callback.Callback.on_exception` method. + :meth:`lightning.pytorch.callbacks.callback.Callback.on_exception` method. - For example, you could use the :class:`pytorch_lightning.callbacks.fault_tolerance.OnExceptionCheckpoint` callback + For example, you could use the :class:`lightning.pytorch.callbacks.fault_tolerance.OnExceptionCheckpoint` callback that saves a checkpoint for you when this exception is raised. """ diff --git a/src/pytorch_lightning/utilities/fetching.py b/src/lightning/pytorch/utilities/fetching.py similarity index 98% rename from src/pytorch_lightning/utilities/fetching.py rename to src/lightning/pytorch/utilities/fetching.py index 1347e70c83..c686c3ebde 100644 --- a/src/pytorch_lightning/utilities/fetching.py +++ b/src/lightning/pytorch/utilities/fetching.py @@ -17,9 +17,9 @@ from typing import Any, Callable, Iterable, Iterator, List, Optional, Sized, Tup from torch.utils.data.dataloader import DataLoader -from lightning_fabric.utilities.data import has_len -from pytorch_lightning.trainer.supporters import CombinedLoader -from pytorch_lightning.utilities.exceptions import MisconfigurationException +from lightning.fabric.utilities.data import has_len +from lightning.pytorch.trainer.supporters import CombinedLoader +from lightning.pytorch.utilities.exceptions import MisconfigurationException def _profile_nothing() -> None: diff --git a/src/pytorch_lightning/utilities/finite_checks.py b/src/lightning/pytorch/utilities/finite_checks.py similarity index 100% rename from src/pytorch_lightning/utilities/finite_checks.py rename to src/lightning/pytorch/utilities/finite_checks.py diff --git a/src/pytorch_lightning/utilities/grads.py b/src/lightning/pytorch/utilities/grads.py similarity index 100% rename from src/pytorch_lightning/utilities/grads.py rename to src/lightning/pytorch/utilities/grads.py diff --git a/src/pytorch_lightning/utilities/imports.py b/src/lightning/pytorch/utilities/imports.py similarity index 100% rename from src/pytorch_lightning/utilities/imports.py rename to src/lightning/pytorch/utilities/imports.py diff --git a/src/pytorch_lightning/utilities/logger.py b/src/lightning/pytorch/utilities/logger.py similarity index 97% rename from src/pytorch_lightning/utilities/logger.py rename to src/lightning/pytorch/utilities/logger.py index 36f718a628..b267a1daad 100644 --- a/src/pytorch_lightning/utilities/logger.py +++ b/src/lightning/pytorch/utilities/logger.py @@ -16,7 +16,7 @@ from pathlib import Path from typing import Any, List, Tuple, Union -from pytorch_lightning.callbacks import Checkpoint +from lightning.pytorch.callbacks import Checkpoint def _version(loggers: List[Any], separator: str = "_") -> Union[int, str]: diff --git a/src/pytorch_lightning/utilities/memory.py b/src/lightning/pytorch/utilities/memory.py similarity index 100% rename from src/pytorch_lightning/utilities/memory.py rename to src/lightning/pytorch/utilities/memory.py diff --git a/src/pytorch_lightning/utilities/meta.py b/src/lightning/pytorch/utilities/meta.py similarity index 100% rename from src/pytorch_lightning/utilities/meta.py rename to src/lightning/pytorch/utilities/meta.py diff --git a/src/pytorch_lightning/utilities/metrics.py b/src/lightning/pytorch/utilities/metrics.py similarity index 94% rename from src/pytorch_lightning/utilities/metrics.py rename to src/lightning/pytorch/utilities/metrics.py index f6147ecfd8..d509932f46 100644 --- a/src/pytorch_lightning/utilities/metrics.py +++ b/src/lightning/pytorch/utilities/metrics.py @@ -15,7 +15,7 @@ from typing import Any -from lightning_fabric.utilities.apply_func import convert_tensors_to_scalars +from lightning.fabric.utilities.apply_func import convert_tensors_to_scalars def metrics_to_scalars(metrics: Any) -> Any: diff --git a/src/pytorch_lightning/utilities/migration/__init__.py b/src/lightning/pytorch/utilities/migration/__init__.py similarity index 83% rename from src/pytorch_lightning/utilities/migration/__init__.py rename to src/lightning/pytorch/utilities/migration/__init__.py index d107ac2bce..66342ecaad 100644 --- a/src/pytorch_lightning/utilities/migration/__init__.py +++ b/src/lightning/pytorch/utilities/migration/__init__.py @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.utilities.migration.utils import migrate_checkpoint # noqa: F401 -from pytorch_lightning.utilities.migration.utils import pl_legacy_patch # noqa: F401 +from lightning.pytorch.utilities.migration.utils import migrate_checkpoint # noqa: F401 +from lightning.pytorch.utilities.migration.utils import pl_legacy_patch # noqa: F401 diff --git a/src/pytorch_lightning/utilities/migration/migration.py b/src/lightning/pytorch/utilities/migration/migration.py similarity index 96% rename from src/pytorch_lightning/utilities/migration/migration.py rename to src/lightning/pytorch/utilities/migration/migration.py index 0a27af2ae0..0b43b2d21f 100644 --- a/src/pytorch_lightning/utilities/migration/migration.py +++ b/src/lightning/pytorch/utilities/migration/migration.py @@ -14,7 +14,7 @@ """Contains migration functions to upgrade legacy checkpoints to the format of the current Lightning version. When Lightning loads a checkpoint, these migrations will be applied on the loaded checkpoint dictionary sequentially, -see :func:`~pytorch_lightning.utilities.migration.utils.migrate_checkpoint`. +see :func:`~lightning.pytorch.utilities.migration.utils.migrate_checkpoint`. For the Lightning developer: How to add a new migration? @@ -26,15 +26,15 @@ For the Lightning developer: How to add a new migration? 3. You can test the migration on a checkpoint (backup your files first) by running: cp model.ckpt model.ckpt.backup - python -m pytorch_lightning.utilities.upgrade_checkpoint --file model.ckpt + python -m lightning.pytorch.utilities.upgrade_checkpoint --file model.ckpt """ import re from typing import Any, Callable, Dict, List -from lightning_fabric.utilities.warnings import PossibleUserWarning -from pytorch_lightning.callbacks.early_stopping import EarlyStopping -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +from lightning.fabric.utilities.warnings import PossibleUserWarning +from lightning.pytorch.callbacks.early_stopping import EarlyStopping +from lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint +from lightning.pytorch.utilities.rank_zero import rank_zero_warn _CHECKPOINT = Dict[str, Any] diff --git a/src/pytorch_lightning/utilities/migration/utils.py b/src/lightning/pytorch/utilities/migration/utils.py similarity index 89% rename from src/pytorch_lightning/utilities/migration/utils.py rename to src/lightning/pytorch/utilities/migration/utils.py index fe2857415d..114e94d43e 100644 --- a/src/pytorch_lightning/utilities/migration/utils.py +++ b/src/lightning/pytorch/utilities/migration/utils.py @@ -19,12 +19,12 @@ from typing import Any, Dict, List, Optional, Tuple, Type from packaging.version import Version -import pytorch_lightning as pl -from lightning_fabric.utilities.imports import _IS_WINDOWS -from lightning_fabric.utilities.types import _PATH -from lightning_fabric.utilities.warnings import PossibleUserWarning -from pytorch_lightning.utilities.migration.migration import _migration_index -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.fabric.utilities.imports import _IS_WINDOWS +from lightning.fabric.utilities.types import _PATH +from lightning.fabric.utilities.warnings import PossibleUserWarning +from lightning.pytorch.utilities.migration.migration import _migration_index +from lightning.pytorch.utilities.rank_zero import rank_zero_warn _log = logging.getLogger(__name__) _CHECKPOINT = Dict[str, Any] @@ -73,9 +73,9 @@ class pl_legacy_patch: """Registers legacy artifacts (classes, methods, etc.) that were removed but still need to be included for unpickling old checkpoints. The following patches apply. - 1. ``pytorch_lightning.utilities.argparse._gpus_arg_default``: Applies to all checkpoints saved prior to + 1. ``lightning.pytorch.utilities.argparse._gpus_arg_default``: Applies to all checkpoints saved prior to version 1.2.8. See: https://github.com/PyTorchLightning/pytorch-lightning/pull/6898 - 2. ``pytorch_lightning.utilities.argparse_utils``: A module that was deprecated in 1.2 and removed in 1.4, + 2. ``lightning.pytorch.utilities.argparse_utils``: A module that was deprecated in 1.2 and removed in 1.4, but still needs to be available for import for legacy checkpoints. Example: @@ -86,8 +86,8 @@ class pl_legacy_patch: def __enter__(self) -> "pl_legacy_patch": # `pl.utilities.argparse_utils` was renamed to `pl.utilities.argparse` - legacy_argparse_module = ModuleType("pytorch_lightning.utilities.argparse_utils") - sys.modules["pytorch_lightning.utilities.argparse_utils"] = legacy_argparse_module + legacy_argparse_module = ModuleType("lightning.pytorch.utilities.argparse_utils") + sys.modules["lightning.pytorch.utilities.argparse_utils"] = legacy_argparse_module # `_gpus_arg_default` used to be imported from these locations legacy_argparse_module._gpus_arg_default = lambda x: x @@ -102,7 +102,7 @@ class pl_legacy_patch: ) -> None: if hasattr(pl.utilities.argparse, "_gpus_arg_default"): delattr(pl.utilities.argparse, "_gpus_arg_default") - del sys.modules["pytorch_lightning.utilities.argparse_utils"] + del sys.modules["lightning.pytorch.utilities.argparse_utils"] def _pl_migrate_checkpoint(checkpoint: _CHECKPOINT, checkpoint_path: Optional[_PATH] = None) -> _CHECKPOINT: @@ -126,7 +126,7 @@ def _pl_migrate_checkpoint(checkpoint: _CHECKPOINT, checkpoint_path: Optional[_P _log.info( f"Lightning automatically upgraded your loaded checkpoint from v{old_version} to v{new_version}." " To apply the upgrade to your files permanently, run" - f" `python -m pytorch_lightning.utilities.upgrade_checkpoint --file {str(path_hint)}`" + f" `python -m lightning.pytorch.utilities.upgrade_checkpoint --file {str(path_hint)}`" ) return checkpoint diff --git a/src/pytorch_lightning/utilities/model_helpers.py b/src/lightning/pytorch/utilities/model_helpers.py similarity index 95% rename from src/pytorch_lightning/utilities/model_helpers.py rename to src/lightning/pytorch/utilities/model_helpers.py index 6d511307ea..1f1936b610 100644 --- a/src/pytorch_lightning/utilities/model_helpers.py +++ b/src/lightning/pytorch/utilities/model_helpers.py @@ -16,7 +16,7 @@ from typing import Any, Optional, Type from lightning_utilities.core.imports import RequirementCache from torch import nn -import pytorch_lightning as pl +import lightning.pytorch as pl def is_overridden(method_name: str, instance: Optional[object] = None, parent: Optional[Type[object]] = None) -> bool: @@ -38,7 +38,7 @@ def is_overridden(method_name: str, instance: Optional[object] = None, parent: O def get_torchvision_model(model_name: str, **kwargs: Any) -> nn.Module: - from pytorch_lightning.utilities.imports import _TORCHVISION_AVAILABLE + from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE if not _TORCHVISION_AVAILABLE: raise ModuleNotFoundError(str(_TORCHVISION_AVAILABLE)) diff --git a/src/pytorch_lightning/utilities/model_summary/__init__.py b/src/lightning/pytorch/utilities/model_summary/__init__.py similarity index 85% rename from src/pytorch_lightning/utilities/model_summary/__init__.py rename to src/lightning/pytorch/utilities/model_summary/__init__.py index e5c9439933..af8a7d38dc 100644 --- a/src/pytorch_lightning/utilities/model_summary/__init__.py +++ b/src/lightning/pytorch/utilities/model_summary/__init__.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pytorch_lightning.utilities.model_summary.model_summary import ( # noqa: F401 +from lightning.pytorch.utilities.model_summary.model_summary import ( # noqa: F401 get_formatted_model_size, get_human_readable_count, LayerSummary, @@ -19,4 +19,4 @@ from pytorch_lightning.utilities.model_summary.model_summary import ( # noqa: F parse_batch_shape, summarize, ) -from pytorch_lightning.utilities.model_summary.model_summary_deepspeed import DeepSpeedSummary # noqa: F401 +from lightning.pytorch.utilities.model_summary.model_summary_deepspeed import DeepSpeedSummary # noqa: F401 diff --git a/src/pytorch_lightning/utilities/model_summary/model_summary.py b/src/lightning/pytorch/utilities/model_summary/model_summary.py similarity index 97% rename from src/pytorch_lightning/utilities/model_summary/model_summary.py rename to src/lightning/pytorch/utilities/model_summary/model_summary.py index ff10172455..d4341a9950 100644 --- a/src/pytorch_lightning/utilities/model_summary/model_summary.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary.py @@ -23,8 +23,8 @@ import torch.nn as nn from torch import Tensor from torch.utils.hooks import RemovableHandle -import pytorch_lightning as pl -from pytorch_lightning.utilities.rank_zero import WarningCache +import lightning.pytorch as pl +from lightning.pytorch.utilities.rank_zero import WarningCache log = logging.getLogger(__name__) warning_cache = WarningCache() @@ -34,7 +34,7 @@ UNKNOWN_SIZE = "?" class LayerSummary: - """Summary class for a single layer in a :class:`~pytorch_lightning.core.module.LightningModule`. It collects + """Summary class for a single layer in a :class:`~lightning.pytorch.core.module.LightningModule`. It collects the following information: - Type of the layer (e.g. Linear, BatchNorm1d, ...) @@ -126,7 +126,7 @@ class LayerSummary: class ModelSummary: - """Generates a summary of all layers in a :class:`~pytorch_lightning.core.module.LightningModule`. + """Generates a summary of all layers in a :class:`~lightning.pytorch.core.module.LightningModule`. Args: model: The model to summarize (also referred to as the root module). @@ -145,7 +145,7 @@ class ModelSummary: Example:: - >>> import pytorch_lightning as pl + >>> import lightning.pytorch as pl >>> class LitModel(pl.LightningModule): ... ... def __init__(self): diff --git a/src/pytorch_lightning/utilities/model_summary/model_summary_deepspeed.py b/src/lightning/pytorch/utilities/model_summary/model_summary_deepspeed.py similarity index 98% rename from src/pytorch_lightning/utilities/model_summary/model_summary_deepspeed.py rename to src/lightning/pytorch/utilities/model_summary/model_summary_deepspeed.py index d5d14d6bd7..aac2c91ea4 100644 --- a/src/pytorch_lightning/utilities/model_summary/model_summary_deepspeed.py +++ b/src/lightning/pytorch/utilities/model_summary/model_summary_deepspeed.py @@ -20,7 +20,7 @@ import torch from lightning_utilities.core.imports import RequirementCache from torch.nn import Parameter -from pytorch_lightning.utilities.model_summary.model_summary import ( +from lightning.pytorch.utilities.model_summary.model_summary import ( _is_lazy_weight_tensor, get_human_readable_count, LayerSummary, diff --git a/src/pytorch_lightning/utilities/parameter_tying.py b/src/lightning/pytorch/utilities/parameter_tying.py similarity index 100% rename from src/pytorch_lightning/utilities/parameter_tying.py rename to src/lightning/pytorch/utilities/parameter_tying.py diff --git a/src/pytorch_lightning/utilities/parsing.py b/src/lightning/pytorch/utilities/parsing.py similarity index 98% rename from src/pytorch_lightning/utilities/parsing.py rename to src/lightning/pytorch/utilities/parsing.py index b68947dc5c..493590bd32 100644 --- a/src/pytorch_lightning/utilities/parsing.py +++ b/src/lightning/pytorch/utilities/parsing.py @@ -22,8 +22,8 @@ from typing import Any, Dict, List, Literal, MutableMapping, Optional, Sequence, from torch import nn -import pytorch_lightning as pl -from pytorch_lightning.utilities.rank_zero import rank_zero_warn +import lightning.pytorch as pl +from lightning.pytorch.utilities.rank_zero import rank_zero_warn def str_to_bool_or_str(val: str) -> Union[str, bool]: @@ -205,7 +205,7 @@ def flatten_dict(source: Dict[str, Any], result: Optional[Dict[str, Any]] = None def save_hyperparameters( obj: Any, *args: Any, ignore: Optional[Union[Sequence[str], str]] = None, frame: Optional[types.FrameType] = None ) -> None: - """See :meth:`~pytorch_lightning.LightningModule.save_hyperparameters`""" + """See :meth:`~lightning.pytorch.LightningModule.save_hyperparameters`""" if len(args) == 1 and not isinstance(args, str) and not args[0]: # args[0] is an empty container @@ -224,7 +224,7 @@ def save_hyperparameters( else: init_args = {} - from pytorch_lightning.core.mixins import HyperparametersMixin + from lightning.pytorch.core.mixins import HyperparametersMixin for local_args in collect_init_args(frame, [], classes=(HyperparametersMixin,)): init_args.update(local_args) diff --git a/src/pytorch_lightning/utilities/rank_zero.py b/src/lightning/pytorch/utilities/rank_zero.py similarity index 88% rename from src/pytorch_lightning/utilities/rank_zero.py rename to src/lightning/pytorch/utilities/rank_zero.py index ef4a3bc6a0..a4a320cf96 100644 --- a/src/pytorch_lightning/utilities/rank_zero.py +++ b/src/lightning/pytorch/utilities/rank_zero.py @@ -15,8 +15,8 @@ import logging # note: we want to keep these indirections so the `rank_zero_module.log` is set (on import) for PL users -from lightning_fabric.utilities.rank_zero import LightningDeprecationWarning # noqa: F401 -from lightning_fabric.utilities.rank_zero import ( # noqa: F401 +from lightning.fabric.utilities.rank_zero import LightningDeprecationWarning # noqa: F401 +from lightning.fabric.utilities.rank_zero import ( # noqa: F401 rank_prefixed_message, rank_zero_debug, rank_zero_deprecation, diff --git a/src/pytorch_lightning/utilities/seed.py b/src/lightning/pytorch/utilities/seed.py similarity index 96% rename from src/pytorch_lightning/utilities/seed.py rename to src/lightning/pytorch/utilities/seed.py index 85d0fba048..edb2f8cd9e 100644 --- a/src/pytorch_lightning/utilities/seed.py +++ b/src/lightning/pytorch/utilities/seed.py @@ -15,7 +15,7 @@ from contextlib import contextmanager from typing import Generator -from lightning_fabric.utilities.seed import _collect_rng_states, _set_rng_states +from lightning.fabric.utilities.seed import _collect_rng_states, _set_rng_states @contextmanager diff --git a/src/pytorch_lightning/utilities/signature_utils.py b/src/lightning/pytorch/utilities/signature_utils.py similarity index 100% rename from src/pytorch_lightning/utilities/signature_utils.py rename to src/lightning/pytorch/utilities/signature_utils.py diff --git a/src/pytorch_lightning/utilities/types.py b/src/lightning/pytorch/utilities/types.py similarity index 98% rename from src/pytorch_lightning/utilities/types.py rename to src/lightning/pytorch/utilities/types.py index 1e9ea753d7..c051e5f165 100644 --- a/src/pytorch_lightning/utilities/types.py +++ b/src/lightning/pytorch/utilities/types.py @@ -26,7 +26,7 @@ from torch import Tensor from torch.utils.data import DataLoader from torchmetrics import Metric -from lightning_fabric.utilities.types import _TORCH_LRSCHEDULER, LRScheduler, ProcessGroup, ReduceLROnPlateau +from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER, LRScheduler, ProcessGroup, ReduceLROnPlateau _NUMBER = Union[int, float] _METRIC = Union[Metric, Tensor, _NUMBER] diff --git a/src/pytorch_lightning/utilities/upgrade_checkpoint.py b/src/lightning/pytorch/utilities/upgrade_checkpoint.py similarity index 97% rename from src/pytorch_lightning/utilities/upgrade_checkpoint.py rename to src/lightning/pytorch/utilities/upgrade_checkpoint.py index 7cad2f9c50..847f9fb953 100644 --- a/src/pytorch_lightning/utilities/upgrade_checkpoint.py +++ b/src/lightning/pytorch/utilities/upgrade_checkpoint.py @@ -21,7 +21,7 @@ from typing import List import torch from tqdm import tqdm -from pytorch_lightning.utilities.migration import migrate_checkpoint, pl_legacy_patch +from lightning.pytorch.utilities.migration import migrate_checkpoint, pl_legacy_patch _log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/utilities/warnings.py b/src/lightning/pytorch/utilities/warnings.py similarity index 91% rename from src/pytorch_lightning/utilities/warnings.py rename to src/lightning/pytorch/utilities/warnings.py index d495cf8322..795b28c738 100644 --- a/src/pytorch_lightning/utilities/warnings.py +++ b/src/lightning/pytorch/utilities/warnings.py @@ -13,4 +13,4 @@ # limitations under the License. """Warning-related utilities.""" # backwards compatibility -from lightning_fabric.utilities.warnings import PossibleUserWarning # noqa: F401 +from lightning.fabric.utilities.warnings import PossibleUserWarning # noqa: F401 diff --git a/src/pytorch_lightning/plugins/__init__.py b/src/pytorch_lightning/plugins/__init__.py deleted file mode 100644 index c14a7256b1..0000000000 --- a/src/pytorch_lightning/plugins/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import Union - -from lightning_fabric.plugins import CheckpointIO, ClusterEnvironment, TorchCheckpointIO, XLACheckpointIO -from pytorch_lightning.plugins.io.async_plugin import AsyncCheckpointIO -from pytorch_lightning.plugins.io.hpu_plugin import HPUCheckpointIO -from pytorch_lightning.plugins.layer_sync import LayerSync, TorchSyncBatchNorm -from pytorch_lightning.plugins.precision.amp import MixedPrecisionPlugin -from pytorch_lightning.plugins.precision.colossalai import ColossalAIPrecisionPlugin -from pytorch_lightning.plugins.precision.deepspeed import DeepSpeedPrecisionPlugin -from pytorch_lightning.plugins.precision.double import DoublePrecisionPlugin -from pytorch_lightning.plugins.precision.fsdp import FSDPMixedPrecisionPlugin -from pytorch_lightning.plugins.precision.hpu import HPUPrecisionPlugin -from pytorch_lightning.plugins.precision.ipu import IPUPrecisionPlugin -from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin -from pytorch_lightning.plugins.precision.tpu import TPUPrecisionPlugin -from pytorch_lightning.plugins.precision.tpu_bf16 import TPUBf16PrecisionPlugin - -PLUGIN = Union[PrecisionPlugin, ClusterEnvironment, CheckpointIO, LayerSync] -PLUGIN_INPUT = Union[PLUGIN, str] - -__all__ = [ - "AsyncCheckpointIO", - "CheckpointIO", - "TorchCheckpointIO", - "XLACheckpointIO", - "HPUCheckpointIO", - "ColossalAIPrecisionPlugin", - "DeepSpeedPrecisionPlugin", - "DoublePrecisionPlugin", - "IPUPrecisionPlugin", - "HPUPrecisionPlugin", - "MixedPrecisionPlugin", - "PrecisionPlugin", - "FSDPMixedPrecisionPlugin", - "TPUPrecisionPlugin", - "TPUBf16PrecisionPlugin", - "LayerSync", - "TorchSyncBatchNorm", -] diff --git a/src/pytorch_lightning/serve/__init__.py b/src/pytorch_lightning/serve/__init__.py deleted file mode 100644 index fb398f918e..0000000000 --- a/src/pytorch_lightning/serve/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from pytorch_lightning.serve.servable_module import ServableModule -from pytorch_lightning.serve.servable_module_validator import ServableModuleValidator - -__all__ = ["ServableModuleValidator", "ServableModule"] diff --git a/src/pytorch_lightning/strategies/__init__.py b/src/pytorch_lightning/strategies/__init__.py deleted file mode 100644 index fec63c32da..0000000000 --- a/src/pytorch_lightning/strategies/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright The Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from lightning_fabric.strategies.registry import _StrategyRegistry -from pytorch_lightning.strategies.bagua import BaguaStrategy # noqa: F401 -from pytorch_lightning.strategies.colossalai import ColossalAIStrategy # noqa: F401 -from pytorch_lightning.strategies.ddp import DDPStrategy # noqa: F401 -from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy # noqa: F401 -from pytorch_lightning.strategies.deepspeed import DeepSpeedStrategy # noqa: F401 -from pytorch_lightning.strategies.dp import DataParallelStrategy # noqa: F401 -from pytorch_lightning.strategies.fsdp import FSDPStrategy # noqa: F401 -from pytorch_lightning.strategies.hpu_parallel import HPUParallelStrategy # noqa: F401 -from pytorch_lightning.strategies.ipu import IPUStrategy # noqa: F401 -from pytorch_lightning.strategies.parallel import ParallelStrategy # noqa: F401 -from pytorch_lightning.strategies.single_device import SingleDeviceStrategy # noqa: F401 -from pytorch_lightning.strategies.single_hpu import SingleHPUStrategy # noqa: F401 -from pytorch_lightning.strategies.single_tpu import SingleTPUStrategy # noqa: F401 -from pytorch_lightning.strategies.strategy import Strategy # noqa: F401 -from pytorch_lightning.strategies.tpu_spawn import TPUSpawnStrategy # noqa: F401 -from pytorch_lightning.strategies.utils import _call_register_strategies - -_STRATEGIES_BASE_MODULE = "pytorch_lightning.strategies" -StrategyRegistry = _StrategyRegistry() -_call_register_strategies(StrategyRegistry, _STRATEGIES_BASE_MODULE) diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/__init__.py b/src/pytorch_lightning/trainer/connectors/logger_connector/__init__.py deleted file mode 100644 index f14e20f232..0000000000 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from pytorch_lightning.trainer.connectors.logger_connector.logger_connector import LoggerConnector # noqa: F401