Standalone Lite CI setup (#14451)

Co-authored-by: Jirka <jirka.borovec@seznam.cz>
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
This commit is contained in:
Adrian Wälchli 2022-09-02 00:13:12 +02:00 committed by GitHub
parent e0c2c3e677
commit 291dc1b615
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 601 additions and 6 deletions

101
.azure/gpu-tests-lite.yml Normal file
View File

@ -0,0 +1,101 @@
# Python package
# Create and test a Python package on multiple Python versions.
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
trigger:
tags:
include:
- '*'
branches:
include:
- "master"
- "release/*"
- "refs/tags/*"
paths:
include:
- ".azure/gpu-tests-lite.yml"
- "requirements/lite/**"
- "src/lightning_lite/**"
- "tests/tests_lite/**"
pr:
- "master"
- "release/*"
jobs:
- job: testing
# how long to run the job before automatically cancelling
timeoutInMinutes: "20"
# how much time to give 'run always even if cancelled tasks' before stopping them
cancelTimeoutInMinutes: "2"
pool: azure-jirka-spot
container:
image: "pytorchlightning/pytorch_lightning:base-cuda-py3.9-torch1.12-cuda11.6.1"
# default shm size is 64m. Increase it to avoid:
# 'Error while creating shared memory: unhandled system error, NCCL version 2.7.8'
options: "--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all --shm-size=512m"
workspace:
clean: all
steps:
- bash: |
lspci | egrep 'VGA|3D'
whereis nvidia
nvidia-smi
which python && which pip
python --version
pip --version
pip list
displayName: 'Image info & NVIDIA'
- bash: |
set -e
TORCH_VERSION=$(python -c "import torch; print(torch.__version__.split('+')[0])")
CUDA_VERSION_MM=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))")
python ./requirements/pytorch/adjust-versions.py requirements/lite/base.txt ${PYTORCH_VERSION}
pip install -e .[strategies] --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
pip install --requirement requirements/pytorch/devel.txt --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
pip list
env:
PACKAGE_NAME: pytorch
FREEZE_REQUIREMENTS: 1
displayName: 'Install dependencies'
- bash: |
set -e
python requirements/collect_env_details.py
python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu >= 2, f'GPU: {mgpu}'"
displayName: 'Env details'
- bash: python -m coverage run --source lightning_lite -m pytest --ignore benchmarks -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50
env:
PL_RUN_CUDA_TESTS: "1"
workingDirectory: tests/tests_lite
displayName: 'Testing: Lite standard'
timeoutInMinutes: "10"
- bash: bash run_standalone_tests.sh
workingDirectory: tests/tests_lite
env:
PL_USE_MOCKED_MNIST: "1"
PL_RUN_CUDA_TESTS: "1"
PL_STANDALONE_TESTS_SOURCE: "lightning_lite"
displayName: 'Testing: Lite standalone tests'
timeoutInMinutes: "10"
- bash: |
python -m coverage report
python -m coverage xml
python -m coverage html
python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure
ls -l
workingDirectory: tests/tests_lite
displayName: 'Statistics'
- task: PublishTestResults@2
displayName: 'Publish test results'
inputs:
testResultsFiles: '$(Build.StagingDirectory)/test-results.xml'
testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)'
condition: succeededOrFailed()

View File

@ -119,6 +119,7 @@ jobs:
env:
PL_USE_MOCKED_MNIST: "1"
PL_RUN_CUDA_TESTS: "1"
PL_STANDALONE_TESTS_SOURCE: "pytorch_lightning"
displayName: 'Testing: PyTorch standalone tests'
timeoutInMinutes: "35"
condition: eq(variables['continue'], '1')

View File

@ -41,6 +41,15 @@ subprojects:
- "pl-cpu (windows-2022, 3.10, latest, stable)"
- "pl-cpu (windows-2022, 3.7, latest, stable)"
- "pl-cpu (windows-2022, 3.7, oldest, stable)"
- "lite-cpu (macOS-11, 3.10, latest, stable)"
- "lite-cpu (macOS-11, 3.7, latest, stable)"
- "lite-cpu (macOS-11, 3.7, oldest, stable)"
- "lite-cpu (ubuntu-20.04, 3.10, latest, stable)"
- "lite-cpu (ubuntu-20.04, 3.7, latest, stable)"
- "lite-cpu (ubuntu-20.04, 3.7, oldest, stable)"
- "lite-cpu (windows-2022, 3.10, latest, stable)"
- "lite-cpu (windows-2022, 3.7, latest, stable)"
- "lite-cpu (windows-2022, 3.7, oldest, stable)"
- "make-doctest (pytorch)"
- "make-html (pytorch)"
- "mypy"
@ -60,6 +69,13 @@ subprojects:
checks:
- "pytorch-lightning (GPUs)"
- id: "lightning_lite: Azure GPU"
paths:
- ".azure/gpu-tests-lite.yml"
- "tests/tests_lite/run_standalone_*.sh"
checks:
- "lightning-lite (GPUs)"
- id: "pytorch_lightning: Azure HPU"
paths:
- ".azure/hpu-tests.yml"

120
.github/workflows/ci-lite-test-full.yml vendored Normal file
View File

@ -0,0 +1,120 @@
name: Test Lite full
# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows
on: # Trigger the workflow on push or pull request, but only for the master branch
push:
branches: [master, "release/*"]
pull_request:
branches: [master, "release/*"]
types: [opened, reopened, ready_for_review, synchronize]
paths:
- "requirements/lite/**"
- "src/lightning_lite/**"
- "tests/tests_lite/**"
- "setup.cfg" # includes pytest config
- ".github/workflows/ci-lite-test-full.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }}
jobs:
lite-cpu:
runs-on: ${{ matrix.os }}
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:
os: [ubuntu-20.04, windows-2022, macOS-11]
python-version: ["3.7", "3.10"] # minimum, maximum
requires: ["oldest", "latest"]
release: ["stable"]
exclude:
# There's no distribution of the oldest PyTorch 1.9 for Python 3.10.
# TODO: Remove the exclusion when dropping PyTorch 1.9 support.
- {python-version: "3.10", requires: "oldest"}
timeout-minutes: 40
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Reset caching
run: python -c "import time; days = time.time() / 60 / 60 / 24; print(f'TIME_PERIOD=d{int(days / 2) * 2}')" >> $GITHUB_ENV
- name: basic setup
run: |
pip --version
pip install -q fire
- name: Setup Windows
if: runner.os == 'windows'
run: |
python .actions/assistant.py requirements_prune_pkgs horovod
- name: Set min. dependencies
if: matrix.requires == 'oldest'
run: |
python .actions/assistant.py replace_oldest_ver
# Note: This uses an internal pip API and may not always work
# https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow
- name: Get pip cache dir
id: pip-cache
run: echo "::set-output name=dir::$(pip cache dir)"
- name: pip cache
uses: actions/cache@v3
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-td${{ env.TIME_PERIOD }}-py${{ matrix.python-version }}-${{ matrix.release }}-${{ matrix.requires }}-${{ hashFiles('requirements/lite/*.txt') }}
restore-keys: |
${{ runner.os }}-pip-td${{ env.TIME_PERIOD }}-py${{ matrix.python-version }}-${{ matrix.release }}-${{ matrix.requires }}-
- name: Install dependencies
env:
PACKAGE_NAME: pytorch # TODO(lite) does this need to say lite?
FREEZE_REQUIREMENTS: 1
run: |
flag=$(python -c "print('--pre' if '${{matrix.release}}' == 'pre' else '')" 2>&1)
url=$(python -c "print('test/cpu/torch_test.html' if '${{matrix.release}}' == 'pre' else 'cpu/torch_stable.html')" 2>&1)
pip install -e .[test] --upgrade $flag --find-links "https://download.pytorch.org/whl/${url}"
pip list
shell: bash
- name: Testing Lite
working-directory: tests/tests_lite
# NOTE: do not include coverage report here, see: https://github.com/nedbat/coveragepy/issues/1003
run: coverage run --source lightning_lite -m pytest -v --durations=50 --junitxml=results-${{ runner.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}-${{ matrix.release }}.xml
- name: Upload pytest results
if: failure()
uses: actions/upload-artifact@v3
with:
name: unittest-results-${{ runner.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}-${{ matrix.release }}
path: tests/tests_lite/results-${{ runner.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}-${{ matrix.release }}.xml
- name: Statistics
if: success()
working-directory: tests/tests_lite
run: |
coverage report
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
if: always()
# see: https://github.com/actions/toolkit/issues/399
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: tests/tests_lite/coverage.xml
flags: cpu,pytest,python${{ matrix.python-version }}
name: CPU-coverage
fail_ci_if_error: false

View File

@ -34,7 +34,7 @@ jobs:
max-parallel: 1
matrix:
os: [ubuntu-20.04, macOS-11, windows-2022]
pkg: ["app", "pytorch"]
pkg: ["app", "lite", "pytorch"]
python-version: [3.8] # , 3.9
steps:
@ -110,6 +110,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Dowload package
# todo: download also lite after it is fist published
run: |
pip install -q fire requests
for pkg in 'app' 'pytorch' ; do

View File

@ -43,7 +43,8 @@ repos:
docs/source-pytorch/_static/images/general/pl_overview_flat.jpg|
docs/source-pytorch/_static/images/general/pl_overview.gif|
src/lightning_app/cli/pl-app-template/ui/yarn.lock|
src/pytorch_lightning/CHANGELOG.md
src/pytorch_lightning/CHANGELOG.md|
src/lightning_lite/CHANGELOG.md
)$
- id: detect-private-key
@ -98,7 +99,8 @@ repos:
exclude: |
(?x)^(
src/pytorch_lightning/CHANGELOG.md|
src/lightning_app/CHANGELOG.md
src/lightning_app/CHANGELOG.md|
src/lightning_lite/CHANGELOG.md
)$
- repo: https://github.com/PyCQA/flake8

View File

@ -39,6 +39,7 @@ local tputests = base.BaseTest {
cd tests/tests_pytorch
coverage run --source=pytorch_lightning -m pytest -vv --durations=0 ./
echo "\n||| Running standalone tests |||\n"
export PL_STANDALONE_TESTS_SOURCE=pytorch_lightning
export PL_STANDALONE_TESTS_BATCH_SIZE=1
bash run_standalone_tests.sh
echo "\n||| END PYTEST LOGS |||\n"

View File

@ -9,6 +9,7 @@ requires = [
known_first_party = [
"pl_examples",
"pytorch_lightning",
"lightning_lite",
"tests_pytorch",
]
profile = "black"

View File

@ -0,0 +1,7 @@
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
torch>=1.9.*, <1.13.0
fsspec[http]>=2021.05.0, !=2021.06.0, <2022.6.0
packaging>=17.0, <=21.3
typing-extensions>=4.0.0, <4.3.1

View File

@ -0,0 +1,2 @@
# install all mandatory dependencies
-r ./base.txt

View File

@ -0,0 +1,5 @@
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
fairscale>=0.4.5, <=0.4.6
deepspeed>=0.6.0, <=0.7.0

View File

@ -54,7 +54,7 @@ from types import ModuleType
from setuptools import setup
_PACKAGE_NAME = os.environ.get("PACKAGE_NAME", "")
_PACKAGE_MAPPING = {"pytorch": "pytorch_lightning", "app": "lightning_app"}
_PACKAGE_MAPPING = {"pytorch": "pytorch_lightning", "app": "lightning_app", "lite": "lightning_lite"}
_REAL_PKG_NAME = _PACKAGE_MAPPING.get(_PACKAGE_NAME, _PACKAGE_NAME)
# https://packaging.python.org/guides/single-sourcing-package-version/
# http://blog.ionelmc.ro/2014/05/25/python-packaging/

View File

@ -0,0 +1,28 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
## [0.0.x] - 2022-MM-DD
### Added
-
### Changed
-
### Deprecated
-
### Removed
-
### Fixed
-

View File

@ -0,0 +1 @@
# TODO

View File

@ -0,0 +1,23 @@
import time
__author__ = "Lightning AI et al."
__author_email__ = "pytorch@lightning.ai"
__license__ = "Apache-2.0"
__copyright__ = f"Copyright (c) 2022-{time.strftime('%Y')}, {__author__}."
__homepage__ = "https://github.com/Lightning-AI/lightning"
__docs_url__ = "https://pytorch-lightning.readthedocs.io/en/stable/"
# TODO
__docs__ = ""
__long_docs__ = """
"""
__all__ = [
"__author__",
"__author_email__",
"__copyright__",
"__docs__",
"__docs_url__",
"__homepage__",
"__license__",
]

View File

@ -0,0 +1,4 @@
"""Root package info."""
from lightning_lite.__about__ import * # noqa: F401, F403
from lightning_lite.__version__ import version as __version__ # noqa: F401

View File

@ -0,0 +1,98 @@
import os
from importlib.util import module_from_spec, spec_from_file_location
from types import ModuleType
from typing import Any, Dict
from setuptools import find_packages
_PROJECT_ROOT = "."
_SOURCE_ROOT = os.path.join(_PROJECT_ROOT, "src")
_PACKAGE_ROOT = os.path.join(_SOURCE_ROOT, "lightning_lite")
_PATH_REQUIREMENTS = os.path.join("requirements", "lite")
_FREEZE_REQUIREMENTS = bool(int(os.environ.get("FREEZE_REQUIREMENTS", 0)))
def _load_py_module(name: str, location: str) -> ModuleType:
spec = spec_from_file_location(name, location)
assert spec, f"Failed to load module {name} from {location}"
py = module_from_spec(spec)
assert spec.loader, f"ModuleSpec.loader is None for {name} from {location}"
spec.loader.exec_module(py)
return py
def _adjust_manifest(**__: Any) -> None:
manifest_path = os.path.join(_PROJECT_ROOT, "MANIFEST.in")
assert os.path.isfile(manifest_path)
with open(manifest_path) as fp:
lines = fp.readlines()
lines += [
"recursive-exclude src *.md" + os.linesep,
"recursive-exclude requirements *.txt" + os.linesep,
"recursive-include requirements/lite *.txt" + os.linesep,
"recursive-include src/lightning_lite *.md" + os.linesep,
]
# TODO: remove this once lightning-ui package is ready as a dependency
lines += ["recursive-include src/lightning_app/ui *" + os.linesep]
with open(manifest_path, "w") as fp:
fp.writelines(lines)
def _setup_args(**__: Any) -> Dict[str, Any]:
_path_setup_tools = os.path.join(_PROJECT_ROOT, ".actions", "setup_tools.py")
_setup_tools = _load_py_module("setup_tools", _path_setup_tools)
_about = _load_py_module("about", os.path.join(_PACKAGE_ROOT, "__about__.py"))
_version = _load_py_module("version", os.path.join(_PACKAGE_ROOT, "__version__.py"))
_long_description = _setup_tools.load_readme_description(
_PACKAGE_ROOT, homepage=_about.__homepage__, version=_version.version
)
return dict(
name="lightning-lite",
version=_version.version, # todo: consider using date version + branch for installation from source
description=_about.__docs__,
author=_about.__author__,
author_email=_about.__author_email__,
url=_about.__homepage__,
download_url="https://github.com/Lightning-AI/lightning",
license=_about.__license__,
packages=find_packages(where="src", include=["lightning_lite", "lightning_lite.*"]),
package_dir={"": "src"},
long_description=_long_description,
long_description_content_type="text/markdown",
include_package_data=True,
zip_safe=False,
keywords=["deep learning", "pytorch", "AI"],
python_requires=">=3.7",
setup_requires=["wheel"],
install_requires=_setup_tools.load_requirements(_PATH_REQUIREMENTS, unfreeze=not _FREEZE_REQUIREMENTS),
# extras_require=_prepare_extras(), # todo
project_urls={
"Bug Tracker": "https://github.com/Lightning-AI/lightning/issues",
"Documentation": "https://pytorch-lightning.rtfd.io/en/latest/",
"Source Code": "https://github.com/Lightning-AI/lightning",
},
classifiers=[
"Environment :: Console",
"Natural Language :: English",
# How mature is this project? Common values are
# 3 - Alpha, 4 - Beta, 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
# Pick your license as you wish
# 'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)

View File

@ -0,0 +1 @@
version = "0.0.0dev"

View File

@ -0,0 +1,3 @@
class LightningLite:
# Placeholder for real implementation
pass

View File

View File

View File

@ -0,0 +1,75 @@
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
# TODO(lite): Add all RunIf conditions once the relevant utilities have moved to lite source dir
class RunIf:
"""RunIf wrapper for simple marking specific cases, fully compatible with pytest.mark::
@RunIf(min_torch="0.0")
@pytest.mark.parametrize("arg1", [1, 2.0])
def test_wrapper(arg1):
assert arg1 > 0.0
"""
def __new__(
self,
*args,
min_cuda_gpus: int = 0,
standalone: bool = False,
**kwargs,
):
"""
Args:
*args: Any :class:`pytest.mark.skipif` arguments.
min_cuda_gpus: Require this number of gpus and that the ``PL_RUN_CUDA_TESTS=1`` environment variable is set.
standalone: Mark the test as standalone, our CI will run it in a separate process.
This requires that the ``PL_RUN_STANDALONE_TESTS=1`` environment variable is set.
**kwargs: Any :class:`pytest.mark.skipif` keyword arguments.
"""
conditions = []
reasons = []
if min_cuda_gpus:
conditions.append(torch.cuda.device_count() < min_cuda_gpus)
reasons.append(f"GPUs>={min_cuda_gpus}")
# used in conftest.py::pytest_collection_modifyitems
kwargs["min_cuda_gpus"] = True
if standalone:
env_flag = os.getenv("PL_RUN_STANDALONE_TESTS", "0")
conditions.append(env_flag != "1")
reasons.append("Standalone execution")
# used in conftest.py::pytest_collection_modifyitems
kwargs["standalone"] = True
reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
return pytest.mark.skipif(
*args, condition=any(conditions), reason=f"Requires: [{' + '.join(reasons)}]", **kwargs
)
@RunIf(min_torch="99")
def test_always_skip():
exit(1)
@pytest.mark.parametrize("arg1", [0.5, 1.0, 2.0])
@RunIf(min_torch="0.0")
def test_wrapper(arg1: float):
assert arg1 > 0.0

View File

@ -0,0 +1,92 @@
#!/bin/bash
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# THIS FILE ASSUMES IT IS RUN INSIDE THE tests/tests_<package> DIRECTORY
# Batch size for testing: Determines how many standalone test invocations run in parallel
# It can be set through the env variable PL_STANDALONE_TESTS_BATCH_SIZE and defaults to 6 if not set
test_batch_size="${PL_STANDALONE_TESTS_BATCH_SIZE:-6}"
source="${PL_STANDALONE_TESTS_SOURCE}"
# this environment variable allows special tests to run
export PL_RUN_STANDALONE_TESTS=1
# python arguments
defaults="-m coverage run --source $source --append -m pytest --no-header"
# find tests marked as `@RunIf(standalone=True)`. done manually instead of with pytest because it is faster
grep_output=$(grep --recursive --word-regexp . --regexp 'standalone=True' --include '*.py')
# file paths, remove duplicates
files=$(echo "$grep_output" | cut -f1 -d: | sort | uniq)
# get the list of parametrizations. we need to call them separately. the last two lines are removed.
# note: if there's a syntax error, this will fail with some garbled output
if [[ "$OSTYPE" == "darwin"* ]]; then
parametrizations=$(python -m pytest $files --collect-only --quiet "$@" | tail -r | sed -e '1,3d' | tail -r)
else
parametrizations=$(python -m pytest $files --collect-only --quiet "$@" | head -n -2)
fi
# remove the "tests/tests_lite" path suffixes
parametrizations=${parametrizations//"tests/tests_lite/"/}
parametrizations_arr=($parametrizations)
# tests to skip - space separated
blocklist='utilities/test_warnings.py'
report=''
rm -f standalone_test_output.txt # in case it exists, remove it
function show_batched_output {
if [ -f standalone_test_output.txt ]; then # if exists
cat standalone_test_output.txt
rm standalone_test_output.txt
fi
}
trap show_batched_output EXIT # show the output on exit
for i in "${!parametrizations_arr[@]}"; do
parametrization=${parametrizations_arr[$i]}
# check blocklist
if echo $blocklist | grep -F "${parametrization}"; then
report+="Skipped\t$parametrization\n"
# do not continue the loop because we might need to wait for batched jobs
else
echo "Running $parametrization"
# execute the test in the background
# redirect to a log file that buffers test output. since the tests will run in the background, we cannot let them
# output to std{out,err} because the outputs would be garbled together
python ${defaults} "$parametrization" &>> standalone_test_output.txt &
# save the PID in an array
pids[${i}]=$!
# add row to the final report
report+="Ran\t$parametrization\n"
fi
if ((($i + 1) % $test_batch_size == 0)); then
# wait for running tests
for pid in ${pids[*]}; do wait $pid; done
unset pids # empty the array
show_batched_output
fi
done
# wait for leftover tests
for pid in ${pids[*]}; do wait $pid; done
show_batched_output
# echo test report
printf '=%.s' {1..80}
printf "\n$report"
printf '=%.s' {1..80}
printf '\n'

View File

@ -0,0 +1,12 @@
from tests_lite.helpers.runif import RunIf
from lightning_lite.lite import LightningLite # noqa: F401
def test_placeholder(tmpdir):
assert True
@RunIf(min_cuda_gpus=2, standalone=True)
def test_placeholder_standalone(tmpdir):
assert True

View File

@ -13,16 +13,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# THIS FILE ASSUMES IT IS RUN INSIDE THE tests/tests_pytorch DIRECTORY
# THIS FILE ASSUMES IT IS RUN INSIDE THE tests/tests_<package> DIRECTORY
# Batch size for testing: Determines how many standalone test invocations run in parallel
# It can be set through the env variable PL_STANDALONE_TESTS_BATCH_SIZE and defaults to 6 if not set
test_batch_size="${PL_STANDALONE_TESTS_BATCH_SIZE:-6}"
source="${PL_STANDALONE_TESTS_SOURCE}"
# this environment variable allows special tests to run
export PL_RUN_STANDALONE_TESTS=1
# python arguments
defaults='-m coverage run --source pytorch_lightning --append -m pytest --no-header'
defaults="-m coverage run --source $source --append -m pytest --no-header"
# find tests marked as `@RunIf(standalone=True)`. done manually instead of with pytest because it is faster
grep_output=$(grep --recursive --word-regexp . --regexp 'standalone=True' --include '*.py')