# Python package # Create and test a Python package on multiple Python versions. # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: tags: include: ['*'] branches: include: - "master" - "release/*" - "refs/tags/*" pr: branches: include: - "master" - "release/*" paths: include: - ".actions/*" - ".azure/gpu-tests-pytorch.yml" - "examples/run_pl_examples.sh" - "examples/pytorch/basics/backbone_image_classifier.py" - "examples/pytorch/basics/autoencoder.py" - "requirements/pytorch/**" - "src/lightning/__about__.py" - "src/lightning/__init__.py" - "src/lightning/__main__.py" - "src/lightning/__setup__.py" - "src/lightning/__version__.py" - "src/lightning/pytorch/**" - "src/pytorch_lightning/*" - "tests/tests_pytorch/**" - "pyproject.toml" # includes pytest config - "requirements/fabric/**" - "src/lightning/fabric/**" - "src/lightning_fabric/*" exclude: - "requirements/*/docs.txt" - "*.md" - "**/*.md" jobs: - job: testing # how long to run the job before automatically cancelling timeoutInMinutes: "80" # how much time to give 'run always even if cancelled tasks' before stopping them cancelTimeoutInMinutes: "2" strategy: matrix: 'PyTorch | latest': image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.0-cuda11.8.0" IS_NIGHTLY: "false" PACKAGE_NAME: "pytorch" 'Lightning | latest': image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.0-cuda11.8.0" IS_NIGHTLY: "false" PACKAGE_NAME: "lightning" 'Lightning | nightly': image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.0-cuda11.8.0" IS_NIGHTLY: "true" PACKAGE_NAME: "lightning" pool: lit-rtx-3090 variables: DEVICES: $( python -c 'print("$(Agent.Name)".split("_")[-1])' ) FREEZE_REQUIREMENTS: "1" PIP_CACHE_DIR: "/var/tmp/pip" container: image: $(image) # default shm size is 64m. Increase it to avoid: # 'Error while creating shared memory: unhandled system error, NCCL version 2.7.8' options: "--gpus=all --shm-size=2gb -v /var/tmp:/var/tmp" workspace: clean: all steps: - bash: | echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)" cuda_ver=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))") echo "##vso[task.setvariable variable=CUDA_VERSION_MM]$cuda_ver" echo "##vso[task.setvariable variable=TORCH_URL]https://download.pytorch.org/whl/cu${cuda_ver}/torch_stable.html" scope=$(python -c 'n = "$(PACKAGE_NAME)" ; print(dict(pytorch="pytorch_lightning").get(n, n))') echo "##vso[task.setvariable variable=COVERAGE_SOURCE]$scope" displayName: 'set env. vars' - bash: | echo $(DEVICES) echo $CUDA_VISIBLE_DEVICES echo $CUDA_VERSION_MM echo $TORCH_URL echo $(IS_NIGHTLY) echo $COVERAGE_SOURCE whereis nvidia nvidia-smi which python && which pip python --version pip --version pip list displayName: 'Image info & NVIDIA' - bash: | PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__.split('+')[0])") pip install -q wget packaging python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py for fpath in `ls requirements/**/*.txt`; do \ python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables.IS_NIGHTLY, 'false')) displayName: 'Adjust dependencies' - bash: | pip install -q -r .actions/requirements.txt python .actions/assistant.py requirements_prune_pkgs \ --packages="[lightning-colossalai,lightning-bagua]" \ --req_files="[requirements/_integrations/strategies.txt]" displayName: 'Prune packages' # these have installation issues - bash: | extra=$(python -c "print({'lightning': 'pytorch-'}.get('$(PACKAGE_NAME)', ''))") pip install -e ".[${extra}dev]" -r requirements/_integrations/strategies.txt pytest-timeout -U --find-links ${TORCH_URL} displayName: 'Install package & dependencies' - bash: | pip uninstall -y torch torchvision pip install torch torchvision -U --pre --no-cache --index-url https://download.pytorch.org/whl/nightly/cu${CUDA_VERSION_MM%} python -c "from torch import __version__ as ver; assert ver.startswith('2.1.0'), ver" # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables.IS_NIGHTLY, 'true')) displayName: 'Bump to nightly' - bash: pip uninstall -y lightning # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables['PACKAGE_NAME'], 'pytorch')) # Lightning is dependency of Habana or other accelerators/integrations so in case we test PL we need to remove it displayName: 'Drop LAI from extensions' - bash: pip uninstall -y pytorch-lightning # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables['PACKAGE_NAME'], 'lightning')) displayName: 'Drop PL for LAI' - bash: | set -e python requirements/collect_env_details.py python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'" python requirements/pytorch/check-avail-extras.py displayName: 'Env details' - bash: python -m pytest pytorch_lightning workingDirectory: src # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables['PACKAGE_NAME'], 'pytorch')) displayName: 'Testing: PyTorch doctests' - bash: | python .actions/assistant.py copy_replace_imports --source_dir="./tests/tests_pytorch" \ --source_import="lightning.fabric,lightning.pytorch" \ --target_import="lightning_fabric,pytorch_lightning" python .actions/assistant.py copy_replace_imports --source_dir="./examples/pytorch/basics" \ --source_import="lightning.fabric,lightning.pytorch" \ --target_import="lightning_fabric,pytorch_lightning" # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables['PACKAGE_NAME'], 'pytorch')) displayName: 'Adjust tests & examples' - bash: | bash .actions/pull_legacy_checkpoints.sh cd tests/legacy bash generate_checkpoints.sh ls -l checkpoints/ displayName: 'Get legacy checkpoints' - bash: python -m coverage run --source ${COVERAGE_SOURCE} -m pytest -v --durations=50 workingDirectory: tests/tests_pytorch env: PL_RUN_CUDA_TESTS: "1" displayName: 'Testing: PyTorch standard' timeoutInMinutes: "35" - bash: bash run_standalone_tests.sh workingDirectory: tests/tests_pytorch env: PL_USE_MOCKED_MNIST: "1" PL_RUN_CUDA_TESTS: "1" PL_STANDALONE_TESTS_SOURCE: $(COVERAGE_SOURCE) displayName: 'Testing: PyTorch standalone tests' timeoutInMinutes: "35" - bash: bash run_standalone_tasks.sh workingDirectory: tests/tests_pytorch env: PL_USE_MOCKED_MNIST: "1" PL_RUN_CUDA_TESTS: "1" displayName: 'Testing: PyTorch standalone tasks' timeoutInMinutes: "10" - bash: | python -m coverage report python -m coverage xml python -m coverage html # https://docs.codecov.com/docs/codecov-uploader curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) \ --flags=gpu,pytest,${COVERAGE_SOURCE} --name="GPU-coverage" --env=linux,azure ls -l workingDirectory: tests/tests_pytorch displayName: 'Statistics' - script: | set -e bash run_pl_examples.sh --trainer.accelerator=gpu --trainer.devices=1 bash run_pl_examples.sh --trainer.accelerator=gpu --trainer.devices=2 --trainer.strategy=ddp bash run_pl_examples.sh --trainer.accelerator=gpu --trainer.devices=2 --trainer.strategy=ddp --trainer.precision=16 workingDirectory: examples env: PL_USE_MOCKED_MNIST: "1" displayName: 'Testing: PyTorch examples'