# Python package # Create and test a Python package on multiple Python versions. # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: tags: include: ["*"] branches: include: - "master" - "release/*" - "refs/tags/*" pr: branches: include: - "master" - "release/*" paths: include: - ".actions/*" - ".azure/gpu-tests-fabric.yml" - "examples/fabric/**" - "examples/run_fabric_examples.sh" - "tests/tests_fabric/run_standalone_*.sh" - "tests/tests_pytorch/run_standalone_tests.sh" # used by fabric through a symlink - "requirements/fabric/**" - "src/lightning/__about__.py" - "src/lightning/__init__.py" - "src/lightning/__main__.py" - "src/lightning/__setup__.py" - "src/lightning/__version__.py" - "src/lightning/fabric/**" - "src/lightning_fabric/*" - "tests/tests_fabric/**" - "pyproject.toml" # includes pytest config exclude: - "requirements/*/docs.txt" - "*.md" - "**/*.md" jobs: - job: testing # how long to run the job before automatically cancelling timeoutInMinutes: "20" # how much time to give 'run always even if cancelled tasks' before stopping them cancelTimeoutInMinutes: "2" pool: lit-rtx-3090 variables: DEVICES: $( python -c 'print("$(Agent.Name)".split("_")[-1])' ) FREEZE_REQUIREMENTS: "1" PIP_CACHE_DIR: "/var/tmp/pip" container: image: $(image) # default shm size is 64m. Increase it to avoid: # 'Error while creating shared memory: unhandled system error, NCCL version 2.7.8' options: "--gpus=all --shm-size=2gb -v /var/tmp:/var/tmp" strategy: matrix: "Fabric | latest": image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.1-cuda12.1.0" PACKAGE_NAME: "fabric" "Lightning | latest": image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.1-cuda12.1.0" PACKAGE_NAME: "lightning" workspace: clean: all steps: - bash: | echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)" cuda_ver=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda.split('.')[:2])))") echo "##vso[task.setvariable variable=CUDA_VERSION_MM]$cuda_ver" echo "##vso[task.setvariable variable=TORCH_URL]https://download.pytorch.org/whl/cu${cuda_ver}/torch_stable.html" scope=$(python -c 'n = "$(PACKAGE_NAME)" ; print(dict(fabric="lightning_fabric").get(n, n))') echo "##vso[task.setvariable variable=COVERAGE_SOURCE]$scope" displayName: "set env. vars" - bash: | echo $(DEVICES) echo $CUDA_VISIBLE_DEVICES echo $CUDA_VERSION_MM echo $TORCH_URL echo $COVERAGE_SOURCE whereis nvidia nvidia-smi which python && which pip python --version pip --version pip list displayName: "Image info & NVIDIA" - bash: | PYTORCH_VERSION=$(python -c "import torch; print(torch.__version__.split('+')[0])") pip install -q wget packaging python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py for fpath in `ls requirements/**/*.txt`; do \ python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ done displayName: "Adjust dependencies" - bash: | extra=$(python -c "print({'lightning': 'fabric-'}.get('$(PACKAGE_NAME)', ''))") pip install -e ".[${extra}dev]" pytest-timeout -U --find-links ${TORCH_URL} displayName: "Install package & dependencies" - bash: | set -e python requirements/collect_env_details.py python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'" displayName: "Env details" - bash: python -m pytest lightning_fabric workingDirectory: src # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables['PACKAGE_NAME'], 'fabric')) displayName: "Testing: Fabric doctests" - bash: | pip install -q -r .actions/requirements.txt python .actions/assistant.py copy_replace_imports --source_dir="./tests/tests_fabric" \ --source_import="lightning.fabric" \ --target_import="lightning_fabric" python .actions/assistant.py copy_replace_imports --source_dir="./examples/fabric" \ --source_import="lightning.fabric" \ --target_import="lightning_fabric" # without succeeded this could run even if the job has already failed condition: and(succeeded(), eq(variables['PACKAGE_NAME'], 'fabric')) displayName: "Adjust tests & examples" - bash: python -m coverage run --source ${COVERAGE_SOURCE} -m pytest -v --durations=50 workingDirectory: tests/tests_fabric env: PL_RUN_CUDA_TESTS: "1" displayName: "Testing: fabric standard" timeoutInMinutes: "10" - bash: bash run_standalone_tests.sh workingDirectory: tests/tests_fabric env: PL_RUN_CUDA_TESTS: "1" PL_STANDALONE_TESTS_SOURCE: $(COVERAGE_SOURCE) displayName: "Testing: fabric standalone tests" timeoutInMinutes: "10" - bash: | python -m coverage report python -m coverage xml python -m coverage html # https://docs.codecov.com/docs/codecov-uploader curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) \ --flags=gpu,pytest,${COVERAGE_SOURCE} --name="GPU-coverage" --env=linux,azure ls -l workingDirectory: tests/tests_fabric displayName: "Statistics" - script: | set -e bash run_fabric_examples.sh --accelerator=cuda --devices=1 bash run_fabric_examples.sh --accelerator=cuda --devices=2 --strategy ddp workingDirectory: examples displayName: "Testing: fabric examples"