define Yapf config (#5591)
* define YAPF * add check * add check * add temp ignore * apply yapf * ex
This commit is contained in:
parent
3da28fd634
commit
99ea2a3b35
|
@ -22,21 +22,21 @@ jobs:
|
|||
- name: Run isort
|
||||
run: isort --settings-path=./pyproject.toml --check-only --diff .
|
||||
|
||||
#code-black:
|
||||
# name: Check code formatting with Black
|
||||
# runs-on: ubuntu-20.04
|
||||
# steps:
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Set up Python 3.8
|
||||
# uses: actions/setup-python@v2
|
||||
# with:
|
||||
# python-version: 3.8
|
||||
# - name: Install Black
|
||||
# run: pip install black==19.10b0
|
||||
# - name: Run Black
|
||||
# run: echo "LGTM"
|
||||
# run black --skip-string-normalization --config=pyproject.toml --check . # TODO, uncomment
|
||||
format-check-yapf:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install yapf
|
||||
pip list
|
||||
shell: bash
|
||||
- name: yapf
|
||||
run: yapf --diff --parallel --recursive .
|
||||
|
||||
python-pep8:
|
||||
name: Python formatting PEP8
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
.git/*
|
||||
|
||||
# TODO
|
||||
pl_examples/*
|
||||
|
||||
# TODO
|
||||
pytorch_lightning/*
|
||||
|
||||
# TODO
|
||||
tests/*
|
|
@ -54,6 +54,7 @@ include pyproject.toml
|
|||
exclude *.yml
|
||||
exclude *.yaml
|
||||
exclude *.jsonnet
|
||||
exclude .yapfignore
|
||||
|
||||
# Exclude pyright config
|
||||
exclude .pyrightconfig.json
|
||||
|
|
|
@ -42,18 +42,21 @@ def assert_parity_absolute(pl_values, pt_values, norm_by: float = 1, max_diff: f
|
|||
|
||||
|
||||
# ParityModuleMNIST runs with num_workers=1
|
||||
@pytest.mark.parametrize('cls_model,max_diff_speed,max_diff_memory', [
|
||||
(ParityModuleRNN, 0.05, 0.0),
|
||||
(ParityModuleMNIST, 0.25, 0.0), # todo: lower this thr
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
'cls_model,max_diff_speed,max_diff_memory',
|
||||
[
|
||||
(ParityModuleRNN, 0.05, 0.0),
|
||||
(ParityModuleMNIST, 0.25, 0.0), # todo: lower this thr
|
||||
]
|
||||
)
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
||||
def test_pytorch_parity(
|
||||
tmpdir,
|
||||
cls_model: LightningModule,
|
||||
max_diff_speed: float,
|
||||
max_diff_memory: float,
|
||||
num_epochs: int = 4,
|
||||
num_runs: int = 3,
|
||||
tmpdir,
|
||||
cls_model: LightningModule,
|
||||
max_diff_speed: float,
|
||||
max_diff_memory: float,
|
||||
num_epochs: int = 4,
|
||||
num_runs: int = 3,
|
||||
):
|
||||
"""
|
||||
Verify that the same pytorch and lightning models achieve the same results
|
||||
|
|
|
@ -100,8 +100,9 @@ def test_ddp_string_sharded_plugin_correctness_amp_multi_gpu():
|
|||
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
|
||||
@pytest.mark.skipif(not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1',
|
||||
reason="test should be run outside of pytest")
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
|
||||
)
|
||||
@DDPLauncher.run("--accelerator ddp --gpus 2 --precision 32")
|
||||
def test_ddp_sharded_plugin_correctness_multi_gpu_ddp(tmpdir, args=None):
|
||||
plugin_parity_test(
|
||||
|
@ -115,8 +116,9 @@ def test_ddp_sharded_plugin_correctness_multi_gpu_ddp(tmpdir, args=None):
|
|||
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
|
||||
@pytest.mark.skipif(not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1',
|
||||
reason="test should be run outside of pytest")
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
|
||||
)
|
||||
@DDPLauncher.run("--accelerator ddp --gpus 2 --precision 16")
|
||||
def test_ddp_sharded_plugin_correctness_amp_multi_gpu_ddp(tmpdir, args=None):
|
||||
plugin_parity_test(
|
||||
|
@ -173,6 +175,7 @@ class SeedTrainLoaderModel(BoringModel):
|
|||
|
||||
|
||||
class SeedTrainLoaderManualModel(SeedTrainLoaderModel):
|
||||
|
||||
def training_step(self, batch, batch_idx, optimizer_idx):
|
||||
# manual
|
||||
# access your optimizers with use_pl_optimizer=False. Default is True
|
||||
|
@ -209,6 +212,7 @@ class SeedTrainLoaderManualModel(SeedTrainLoaderModel):
|
|||
|
||||
|
||||
class SeedTrainLoaderMultipleOptimizersModel(SeedTrainLoaderModel):
|
||||
|
||||
def training_step(self, batch, batch_idx, optimizer_idx):
|
||||
output = self.layer(batch)
|
||||
loss = self.loss(batch, output)
|
||||
|
@ -247,7 +251,7 @@ def record_ddp_fit_model_stats(trainer, model, use_cuda):
|
|||
|
||||
if use_cuda:
|
||||
torch.cuda.synchronize()
|
||||
max_memory = torch.cuda.max_memory_allocated() / 2 ** 20
|
||||
max_memory = torch.cuda.max_memory_allocated() / 2**20
|
||||
|
||||
total_time = time.perf_counter() - time_start
|
||||
|
||||
|
@ -255,13 +259,13 @@ def record_ddp_fit_model_stats(trainer, model, use_cuda):
|
|||
|
||||
|
||||
def plugin_parity_test(
|
||||
model_cls: Type[SeedTrainLoaderModel],
|
||||
plugin: Union[str, DDPPlugin],
|
||||
seed: int = 42,
|
||||
accelerator: str = 'ddp_spawn',
|
||||
gpus: int = 0,
|
||||
precision: int = 32,
|
||||
max_percent_speed_diff: float = 0.1,
|
||||
model_cls: Type[SeedTrainLoaderModel],
|
||||
plugin: Union[str, DDPPlugin],
|
||||
seed: int = 42,
|
||||
accelerator: str = 'ddp_spawn',
|
||||
gpus: int = 0,
|
||||
precision: int = 32,
|
||||
max_percent_speed_diff: float = 0.1,
|
||||
):
|
||||
"""
|
||||
Ensures that the trained model is identical to the standard DDP implementation.
|
||||
|
@ -292,11 +296,7 @@ def plugin_parity_test(
|
|||
accelerator=accelerator,
|
||||
)
|
||||
|
||||
max_memory_ddp, ddp_time = record_ddp_fit_model_stats(
|
||||
trainer=trainer,
|
||||
model=ddp_model,
|
||||
use_cuda=use_cuda
|
||||
)
|
||||
max_memory_ddp, ddp_time = record_ddp_fit_model_stats(trainer=trainer, model=ddp_model, use_cuda=use_cuda)
|
||||
|
||||
# Reset and train Custom DDP
|
||||
seed_everything(seed)
|
||||
|
@ -312,9 +312,7 @@ def plugin_parity_test(
|
|||
)
|
||||
|
||||
max_memory_custom, custom_model_time = record_ddp_fit_model_stats(
|
||||
trainer=trainer,
|
||||
model=custom_plugin_model,
|
||||
use_cuda=use_cuda
|
||||
trainer=trainer, model=custom_plugin_model, use_cuda=use_cuda
|
||||
)
|
||||
|
||||
# Assert model parameters are identical after fit
|
||||
|
|
|
@ -72,7 +72,8 @@ for md in glob.glob(os.path.join(PATH_ROOT, '.github', '*.md')):
|
|||
shutil.copy(md, os.path.join(PATH_HERE, FOLDER_GENERATED, os.path.basename(md)))
|
||||
# copy also the changelog
|
||||
_transform_changelog(
|
||||
os.path.join(PATH_ROOT, 'CHANGELOG.md'), os.path.join(PATH_HERE, FOLDER_GENERATED, 'CHANGELOG.md')
|
||||
os.path.join(PATH_ROOT, 'CHANGELOG.md'),
|
||||
os.path.join(PATH_HERE, FOLDER_GENERATED, 'CHANGELOG.md'),
|
||||
)
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
@ -86,7 +87,6 @@ version = pytorch_lightning.__version__
|
|||
# The full version, including alpha/beta/rc tags
|
||||
release = pytorch_lightning.__version__
|
||||
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
|
@ -202,7 +202,6 @@ html_static_path = ['_images', '_templates', '_static']
|
|||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
|
@ -235,9 +234,7 @@ latex_documents = [
|
|||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, project, project + ' Documentation', [author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, project, project + ' Documentation', [author], 1)]
|
||||
|
||||
# -- Options for Texinfo output ----------------------------------------------
|
||||
|
||||
|
@ -245,8 +242,15 @@ man_pages = [
|
|||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, project, project + ' Documentation', author, project,
|
||||
'One line description of project.', 'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
project,
|
||||
project + ' Documentation',
|
||||
author,
|
||||
project,
|
||||
'One line description of project.',
|
||||
'Miscellaneous',
|
||||
),
|
||||
]
|
||||
|
||||
# -- Options for Epub output -------------------------------------------------
|
||||
|
|
|
@ -22,6 +22,7 @@ PATH_LEGACY = os.path.dirname(__file__)
|
|||
|
||||
|
||||
class RandomDataset(Dataset):
|
||||
|
||||
def __init__(self, size, length: int = 100):
|
||||
self.len = length
|
||||
self.data = torch.randn(length, size)
|
||||
|
|
18
setup.cfg
18
setup.cfg
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
|
||||
[tool:pytest]
|
||||
|
||||
norecursedirs =
|
||||
.git
|
||||
dist
|
||||
|
@ -32,6 +31,7 @@ markers =
|
|||
gpus_param_tests
|
||||
junit_duration_report = call
|
||||
|
||||
|
||||
[coverage:report]
|
||||
exclude_lines =
|
||||
pragma: no-cover
|
||||
|
@ -54,6 +54,7 @@ omit =
|
|||
pytorch_lightning/utilities/distributed.py
|
||||
pytorch_lightning/tuner/auto_gpu_select.py
|
||||
|
||||
|
||||
[flake8]
|
||||
# TODO: this should be 88 or 100 according PEP8
|
||||
max-line-length = 120
|
||||
|
@ -70,6 +71,7 @@ ignore =
|
|||
E231 # missing whitespace after ',', ';', or ':'; for black
|
||||
W503 # line break before binary operator, need for black
|
||||
|
||||
|
||||
# setup.cfg or tox.ini
|
||||
[check-manifest]
|
||||
ignore =
|
||||
|
@ -78,11 +80,13 @@ ignore =
|
|||
.github/*
|
||||
.circleci
|
||||
|
||||
|
||||
[metadata]
|
||||
license_file = LICENSE
|
||||
# long_description = file:README.md
|
||||
# long_description_content_type = text/markdown
|
||||
|
||||
|
||||
[pydocstyle]
|
||||
convention = pep257
|
||||
# D104, D107: Ignore missing docstrings in __init__ files and methods.
|
||||
|
@ -91,6 +95,18 @@ add-ignore = D104,D107,D202
|
|||
max-line-length = 120
|
||||
|
||||
|
||||
[yapf]
|
||||
based_on_style = pep8
|
||||
spaces_before_comment = 2
|
||||
split_before_logical_operator = true
|
||||
COLUMN_LIMIT = 120
|
||||
COALESCE_BRACKETS = true
|
||||
DEDENT_CLOSING_BRACKETS = true
|
||||
ALLOW_SPLIT_BEFORE_DICT_VALUE = false
|
||||
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
|
||||
NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = false
|
||||
|
||||
|
||||
[mypy]
|
||||
# Typing tests is low priority, but enabling type checking on the
|
||||
# untyped test functions (using `--check-untyped-defs`) is still
|
||||
|
|
8
setup.py
8
setup.py
|
@ -46,9 +46,7 @@ extras['dev'] = extras['extra'] + extras['loggers'] + extras['test']
|
|||
extras['all'] = extras['dev'] + extras['examples'] # + extras['docs']
|
||||
|
||||
# These packages shall be installed only on GPU machines
|
||||
PACKAGES_GPU_ONLY = (
|
||||
'horovod',
|
||||
)
|
||||
PACKAGES_GPU_ONLY = ['horovod']
|
||||
# create a version for CPU machines
|
||||
for ex in ('cpu', 'cpu-extra'):
|
||||
kw = ex.split('-')[1] if '-' in ex else 'all'
|
||||
|
@ -70,24 +68,20 @@ setup(
|
|||
download_url='https://github.com/PyTorchLightning/pytorch-lightning',
|
||||
license=pytorch_lightning.__license__,
|
||||
packages=find_packages(exclude=['tests', 'tests/*', 'benchmarks', 'legacy', 'legacy/*']),
|
||||
|
||||
long_description=_load_readme_description(PATH_ROOT),
|
||||
long_description_content_type='text/markdown',
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
|
||||
keywords=['deep learning', 'pytorch', 'AI'],
|
||||
python_requires='>=3.6',
|
||||
setup_requires=[],
|
||||
install_requires=_load_requirements(PATH_ROOT),
|
||||
extras_require=extras,
|
||||
|
||||
project_urls={
|
||||
"Bug Tracker": "https://github.com/PyTorchLightning/pytorch-lightning/issues",
|
||||
"Documentation": "https://pytorch-lightning.rtfd.io/en/latest/",
|
||||
"Source Code": "https://github.com/PyTorchLightning/pytorch-lightning",
|
||||
},
|
||||
|
||||
classifiers=[
|
||||
'Environment :: Console',
|
||||
'Natural Language :: English',
|
||||
|
|
Loading…
Reference in New Issue