diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 8c4d9aa136..f0d70e2f84 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -6,16 +6,16 @@ Brief description of all our automation tools used for boosting development perf ## Unit and Integration Testing -| workflow file | action | accelerator | -| -------------------------------------- | ----------------------------------------------------------------------------------------- | ----------- | -| .github/workflows/ci-tests-fabric.yml | Run all tests except for accelerator-specific and standalone. | CPU | -| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific and standalone. | CPU | -| .github/workflows/ci-tests-data.yml | Run unit and integration tests with data pipelining. | CPU | -| .azure-pipelines/gpu-tests-fabric.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU | -| .azure-pipelines/gpu-tests-pytorch.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU | -| .azure-pipelines/gpu-benchmarks.yml | Run speed/memory benchmarks for parity with vanila PyTorch. | GPU | -| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific, standalone and slow tests. | CPU | -| .github/workflows/tpu-tests.yml | Run only TPU-specific tests. Requires that the PR title contains '\[TPU\]' | TPU | +| workflow file | action | accelerator | +| -------------------------------------- | -------------------------------------------------------------------------- | ----------- | +| .github/workflows/ci-tests-fabric.yml | Run all tests except for accelerator-specific and standalone. | CPU | +| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific and standalone. | CPU | +| .github/workflows/ci-tests-data.yml | Run unit and integration tests with data pipelining. | CPU | +| .azure-pipelines/gpu-tests-fabric.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU | +| .azure-pipelines/gpu-tests-pytorch.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU | +| .azure-pipelines/gpu-benchmarks.yml | Run speed/memory benchmarks for parity with vanila PyTorch. | GPU | +| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific, standalone and slow tests. | CPU | +| .github/workflows/tpu-tests.yml | Run only TPU-specific tests. Requires that the PR title contains '\[TPU\]' | TPU | \* Each standalone test needs to be run in separate processes to avoid unwanted interactions between test cases. diff --git a/.github/workflows/ci-pkg-extend.yml b/.github/workflows/ci-pkg-extend.yml index d9374a0e72..83263d0cd5 100644 --- a/.github/workflows/ci-pkg-extend.yml +++ b/.github/workflows/ci-pkg-extend.yml @@ -26,7 +26,6 @@ defaults: shell: bash jobs: - import-pkg: runs-on: ${{ matrix.os }} strategy: @@ -50,4 +49,3 @@ jobs: - name: Try importing run: from lightning.${{ matrix.pkg-name }} import * shell: python - diff --git a/examples/fabric/tensor_parallel/train.py b/examples/fabric/tensor_parallel/train.py index 1435e5c200..4a98f12cf6 100644 --- a/examples/fabric/tensor_parallel/train.py +++ b/examples/fabric/tensor_parallel/train.py @@ -1,14 +1,13 @@ import lightning as L import torch import torch.nn.functional as F +from data import RandomTokenDataset from lightning.fabric.strategies import ModelParallelStrategy from model import ModelArgs, Transformer from parallelism import parallelize from torch.distributed.tensor.parallel import loss_parallel from torch.utils.data import DataLoader -from data import RandomTokenDataset - def train(): strategy = ModelParallelStrategy( diff --git a/examples/pytorch/tensor_parallel/train.py b/examples/pytorch/tensor_parallel/train.py index 37c620f458..6a91e1242e 100644 --- a/examples/pytorch/tensor_parallel/train.py +++ b/examples/pytorch/tensor_parallel/train.py @@ -1,14 +1,13 @@ import lightning as L import torch import torch.nn.functional as F +from data import RandomTokenDataset from lightning.pytorch.strategies import ModelParallelStrategy from model import ModelArgs, Transformer from parallelism import parallelize from torch.distributed.tensor.parallel import loss_parallel from torch.utils.data import DataLoader -from data import RandomTokenDataset - class Llama3(L.LightningModule): def __init__(self): diff --git a/src/lightning/app/__init__.py b/src/lightning/app/__init__.py index 0d85d9ee33..883566cd6f 100644 --- a/src/lightning/app/__init__.py +++ b/src/lightning/app/__init__.py @@ -13,12 +13,12 @@ else: # Enable resolution at least for lower data namespace sys.modules["lightning.app"] = lightning_app - from lightning_app.core.app import LightningApp # noqa: E402 - from lightning_app.core.flow import LightningFlow # noqa: E402 - from lightning_app.core.work import LightningWork # noqa: E402 - from lightning_app.plugin.plugin import LightningPlugin # noqa: E402 - from lightning_app.utilities.packaging.build_config import BuildConfig # noqa: E402 - from lightning_app.utilities.packaging.cloud_compute import CloudCompute # noqa: E402 + from lightning_app.core.app import LightningApp + from lightning_app.core.flow import LightningFlow + from lightning_app.core.work import LightningWork + from lightning_app.plugin.plugin import LightningPlugin + from lightning_app.utilities.packaging.build_config import BuildConfig + from lightning_app.utilities.packaging.cloud_compute import CloudCompute if module_available("lightning_app.components.demo"): from lightning.app.components import demo # noqa: F401