precommit: drop Black in favor of Ruff (#19380)

This commit is contained in:
Jirka Borovec 2024-01-31 18:09:39 +01:00 committed by GitHub
parent 01f8531c9d
commit 6421dd8d4f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 12 additions and 20 deletions

View File

@ -84,25 +84,11 @@ repos:
- flake8-return
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.1.9"
rev: "v0.1.15"
hooks:
- id: ruff
args: ["--fix", "--preview"]
- repo: https://github.com/psf/black
rev: 23.12.1
hooks:
- id: black
name: Format code
exclude: docs/source-app
- repo: https://github.com/asottile/blacken-docs
rev: 1.16.0
hooks:
- id: blacken-docs
args: ["--line-length=120"]
exclude: docs/source-app
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
hooks:

View File

@ -77,7 +77,7 @@ The below table lists all relevant strategies available in Lightning with their
- Strategy for multi-process single-device training on one or multiple nodes. :ref:`Learn more. <accelerators/gpu_intermediate:Distributed Data Parallel>`
* - ddp_spawn
- :class:`~lightning.pytorch.strategies.DDPStrategy`
- Same as "ddp" but launches processes using :func:`torch.multiprocessing.spawn` method and joins processes after training finishes. :ref:`Learn more. <accelerators/gpu_intermediate:Distributed Data Parallel Spawn>`
- Same as "ddp" but launches processes using ``torch.multiprocessing.spawn`` method and joins processes after training finishes. :ref:`Learn more. <accelerators/gpu_intermediate:Distributed Data Parallel Spawn>`
* - deepspeed
- :class:`~lightning.pytorch.strategies.DeepSpeedStrategy`
- Provides capabilities to run training using the DeepSpeed library, with training optimizations for large billion parameter models. :doc:`Learn more. <../advanced/model_parallel/deepspeed>`

View File

@ -93,6 +93,8 @@ ignore-init-module-imports = true
"S113", # todo: Probable use of requests call without timeout
"S301", # todo: `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue
"S324", # todo: Probable use of insecure hash functions in `hashlib`
"S403", # todo: `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure
"S404", # todo: `subprocess` module is possibly insecure
"S602", # todo: `subprocess` call with `shell=True` identified, security issue
"S603", # todo: `subprocess` call: check for execution of untrusted input
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
@ -108,6 +110,8 @@ ignore-init-module-imports = true
"S311", # todo: Standard pseudo-random generators are not suitable for cryptographic purposes
"S108", # todo: Probable insecure usage of temporary file or directory: "/tmp/sys-customizations-sync"
"S202", # Uses of `tarfile.extractall()`
"S403", # `pickle`, `cPickle`, `dill`, and `shelve` modules are possibly insecure
"S404", # `subprocess` module is possibly insecure
"S602", # todo: `subprocess` call with `shell=True` identified, security issue
"S603", # todo: `subprocess` call: check for execution of untrusted input
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`

View File

@ -16,7 +16,6 @@ from unittest import mock
import pytest
import torch
from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from lightning.pytorch import LightningModule, Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
from lightning.pytorch.utilities.compile import from_compiled, to_uncompiled
@ -26,7 +25,8 @@ from tests_pytorch.conftest import mock_cuda_count
from tests_pytorch.helpers.runif import RunIf
@pytest.mark.skipif(sys.platform == "darwin" and not _TORCH_GREATER_EQUAL_2_1, reason="Fix for MacOS in PyTorch 2.1")
# https://github.com/pytorch/pytorch/issues/95708
@pytest.mark.skipif(sys.platform == "darwin", reason="fatal error: 'omp.h' file not found")
@RunIf(dynamo=True)
@mock.patch("lightning.pytorch.trainer.call._call_and_handle_interrupt")
def test_trainer_compiled_model(_, tmp_path, monkeypatch, mps_count_0):
@ -112,7 +112,8 @@ def test_compile_uncompile():
assert not has_dynamo(to_uncompiled_model.predict_step)
@pytest.mark.skipif(sys.platform == "darwin" and not _TORCH_GREATER_EQUAL_2_1, reason="Fix for MacOS in PyTorch 2.1")
# https://github.com/pytorch/pytorch/issues/95708
@pytest.mark.skipif(sys.platform == "darwin", reason="fatal error: 'omp.h' file not found")
@RunIf(dynamo=True)
def test_trainer_compiled_model_that_logs(tmp_path):
class MyModel(BoringModel):
@ -137,7 +138,8 @@ def test_trainer_compiled_model_that_logs(tmp_path):
assert set(trainer.callback_metrics) == {"loss"}
@pytest.mark.skipif(sys.platform == "darwin" and not _TORCH_GREATER_EQUAL_2_1, reason="Fix for MacOS in PyTorch 2.1")
# https://github.com/pytorch/pytorch/issues/95708
@pytest.mark.skipif(sys.platform == "darwin", reason="fatal error: 'omp.h' file not found")
@RunIf(dynamo=True)
def test_trainer_compiled_model_test(tmp_path):
model = BoringModel()