name: Test PyTorch slow # see: https://help.github.com/en/actions/reference/events-that-trigger-workflows on: # Trigger the workflow on push or pull request, but only for the master branch push: branches: [master, "release/*"] pull_request: branches: [master, "release/*"] types: [opened, reopened, ready_for_review, synchronize] concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} jobs: slow: runs-on: ${{ matrix.os }} if: github.event.pull_request.draft == false strategy: fail-fast: false matrix: os: [ubuntu-20.04, windows-2022, macOS-11] # same config as '.azure-pipelines/gpu-tests.yml' python-version: ["3.7"] pytorch-version: ["1.11"] timeout-minutes: 20 steps: - uses: actions/checkout@v2 - name: Get changed files id: changed-files uses: tj-actions/changed-files@v28 - name: Decide if the test should be skipped id: skip shell: bash -l {0} run: | FILTER='src/pytorch_lightning|requirements/pytorch|tests/tests_pytorch|examples/pl_*' echo "${{ steps.changed-files.outputs.all_changed_files }}" | tr " " "\n" > changed_files.txt MATCHES=$(cat changed_files.txt | grep -E $FILTER) echo $MATCHES if [ -z "$MATCHES" ]; then echo "Skip" echo "::set-output name=continue::0" else echo "Continue" echo "::set-output name=continue::1" fi - uses: actions/setup-python@v4 if: ${{ (steps.skip.outputs.continue == '1') }} with: python-version: ${{ matrix.python-version }} - name: Reset caching if: ${{ (steps.skip.outputs.continue == '1') }} run: python -c "import time; days = time.time() / 60 / 60 / 24; print(f'TIME_PERIOD=d{int(days / 2) * 2}')" >> $GITHUB_ENV - name: Get pip cache if: ${{ (steps.skip.outputs.continue == '1') }} id: pip-cache run: | python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" - name: Cache pip if: ${{ (steps.skip.outputs.continue == '1') }} uses: actions/cache@v3 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-pip-td${{ env.TIME_PERIOD }}-py${{ matrix.python-version }}-${{ hashFiles('requirements/pytorch/base.txt') }} restore-keys: | ${{ runner.os }}-pip-td${{ env.TIME_PERIOD }}-py${{ matrix.python-version }}- - name: Install dependencies if: ${{ (steps.skip.outputs.continue == '1') }} env: PACKAGE_NAME: pytorch FREEZE_REQUIREMENTS: 1 run: | # adjust versions according installed Torch version python ./requirements/pytorch/adjust-versions.py requirements.txt ${{ matrix.pytorch-version }} pip install -e .[test] --find-links https://download.pytorch.org/whl/cpu/torch_stable.html --upgrade pip list shell: bash - name: Testing PyTorch if: ${{ (steps.skip.outputs.continue == '1') }} working-directory: tests/tests_pytorch run: coverage run --source pytorch_lightning -m pytest -v --junitxml=results-${{ runner.os }}-py${{ matrix.python-version }}.xml env: PL_RUN_SLOW_TESTS: 1 - name: Upload pytest test results if: ${{ (failure()) && (steps.skip.outputs.continue == '1') }} uses: actions/upload-artifact@v3 with: name: unittest-results-${{ runner.os }}-py${{ matrix.python-version }} path: tests/tests_pytorch/results-${{ runner.os }}-py${{ matrix.python-version }}.xml - name: Statistics if: ${{ (success()) && (steps.skip.outputs.continue == '1') }} working-directory: tests/tests_pytorch run: | coverage report coverage xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 if: ${{ (success()) && (steps.skip.outputs.continue == '1') }} # see: https://github.com/actions/toolkit/issues/399 continue-on-error: true with: token: ${{ secrets.CODECOV_TOKEN }} file: tests/tests_pytorch/coverage.xml flags: cpu,pytest,torch${{ matrix.pytorch-version }} name: CPU-coverage fail_ci_if_error: false