[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-07-08 11:05:18 +00:00 committed by Luca Antiga
parent cc457fe5eb
commit cf348673ed
5 changed files with 18 additions and 22 deletions

View File

@ -6,16 +6,16 @@ Brief description of all our automation tools used for boosting development perf
## Unit and Integration Testing ## Unit and Integration Testing
| workflow file | action | accelerator | | workflow file | action | accelerator |
| -------------------------------------- | ----------------------------------------------------------------------------------------- | ----------- | | -------------------------------------- | -------------------------------------------------------------------------- | ----------- |
| .github/workflows/ci-tests-fabric.yml | Run all tests except for accelerator-specific and standalone. | CPU | | .github/workflows/ci-tests-fabric.yml | Run all tests except for accelerator-specific and standalone. | CPU |
| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific and standalone. | CPU | | .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific and standalone. | CPU |
| .github/workflows/ci-tests-data.yml | Run unit and integration tests with data pipelining. | CPU | | .github/workflows/ci-tests-data.yml | Run unit and integration tests with data pipelining. | CPU |
| .azure-pipelines/gpu-tests-fabric.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU | | .azure-pipelines/gpu-tests-fabric.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU |
| .azure-pipelines/gpu-tests-pytorch.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU | | .azure-pipelines/gpu-tests-pytorch.yml | Run only GPU-specific tests, standalone\*, and examples. | GPU |
| .azure-pipelines/gpu-benchmarks.yml | Run speed/memory benchmarks for parity with vanila PyTorch. | GPU | | .azure-pipelines/gpu-benchmarks.yml | Run speed/memory benchmarks for parity with vanila PyTorch. | GPU |
| .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific, standalone and slow tests. | CPU | | .github/workflows/ci-tests-pytorch.yml | Run all tests except for accelerator-specific, standalone and slow tests. | CPU |
| .github/workflows/tpu-tests.yml | Run only TPU-specific tests. Requires that the PR title contains '\[TPU\]' | TPU | | .github/workflows/tpu-tests.yml | Run only TPU-specific tests. Requires that the PR title contains '\[TPU\]' | TPU |
\* Each standalone test needs to be run in separate processes to avoid unwanted interactions between test cases. \* Each standalone test needs to be run in separate processes to avoid unwanted interactions between test cases.

View File

@ -26,7 +26,6 @@ defaults:
shell: bash shell: bash
jobs: jobs:
import-pkg: import-pkg:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
@ -50,4 +49,3 @@ jobs:
- name: Try importing - name: Try importing
run: from lightning.${{ matrix.pkg-name }} import * run: from lightning.${{ matrix.pkg-name }} import *
shell: python shell: python

View File

@ -1,14 +1,13 @@
import lightning as L import lightning as L
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from data import RandomTokenDataset
from lightning.fabric.strategies import ModelParallelStrategy from lightning.fabric.strategies import ModelParallelStrategy
from model import ModelArgs, Transformer from model import ModelArgs, Transformer
from parallelism import parallelize from parallelism import parallelize
from torch.distributed.tensor.parallel import loss_parallel from torch.distributed.tensor.parallel import loss_parallel
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from data import RandomTokenDataset
def train(): def train():
strategy = ModelParallelStrategy( strategy = ModelParallelStrategy(

View File

@ -1,14 +1,13 @@
import lightning as L import lightning as L
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from data import RandomTokenDataset
from lightning.pytorch.strategies import ModelParallelStrategy from lightning.pytorch.strategies import ModelParallelStrategy
from model import ModelArgs, Transformer from model import ModelArgs, Transformer
from parallelism import parallelize from parallelism import parallelize
from torch.distributed.tensor.parallel import loss_parallel from torch.distributed.tensor.parallel import loss_parallel
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from data import RandomTokenDataset
class Llama3(L.LightningModule): class Llama3(L.LightningModule):
def __init__(self): def __init__(self):

View File

@ -13,12 +13,12 @@ else:
# Enable resolution at least for lower data namespace # Enable resolution at least for lower data namespace
sys.modules["lightning.app"] = lightning_app sys.modules["lightning.app"] = lightning_app
from lightning_app.core.app import LightningApp # noqa: E402 from lightning_app.core.app import LightningApp
from lightning_app.core.flow import LightningFlow # noqa: E402 from lightning_app.core.flow import LightningFlow
from lightning_app.core.work import LightningWork # noqa: E402 from lightning_app.core.work import LightningWork
from lightning_app.plugin.plugin import LightningPlugin # noqa: E402 from lightning_app.plugin.plugin import LightningPlugin
from lightning_app.utilities.packaging.build_config import BuildConfig # noqa: E402 from lightning_app.utilities.packaging.build_config import BuildConfig
from lightning_app.utilities.packaging.cloud_compute import CloudCompute # noqa: E402 from lightning_app.utilities.packaging.cloud_compute import CloudCompute
if module_available("lightning_app.components.demo"): if module_available("lightning_app.components.demo"):
from lightning.app.components import demo # noqa: F401 from lightning.app.components import demo # noqa: F401