79 lines
2.8 KiB
YAML
79 lines
2.8 KiB
YAML
# Copyright The PyTorch Lightning team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# https://docs.drone.io/pipeline/docker/examples/languages/python/#python-example
|
|
|
|
kind: pipeline
|
|
type: docker
|
|
name: torch-GPU
|
|
|
|
steps:
|
|
- name: testing
|
|
image: pytorchlightning/pytorch_lightning:cuda-extras-py3.7-torch1.5
|
|
|
|
environment:
|
|
SLURM_LOCALID: 0
|
|
CODECOV_TOKEN:
|
|
from_secret: codecov_token
|
|
MKL_THREADING_LAYER: GNU
|
|
HOROVOD_GPU_ALLREDUCE: NCCL
|
|
HOROVOD_GPU_BROADCAST: NCCL
|
|
HOROVOD_WITH_PYTORCH: 1
|
|
HOROVOD_WITHOUT_TENSORFLOW: 1
|
|
HOROVOD_WITHOUT_MXNET: 1
|
|
HOROVOD_WITH_GLOO: 1
|
|
HOROVOD_WITHOUT_MPI: 1
|
|
|
|
#volumes:
|
|
# # Mount pip cache from host
|
|
# - name: pip_cache
|
|
# path: /opt/conda/lib/python3.7/site-packages
|
|
|
|
commands:
|
|
- export PATH="$PATH:/root/.local/bin"
|
|
- python --version
|
|
- pip install pip -U
|
|
- pip --version
|
|
- nvidia-smi
|
|
#- bash ./requirements/install_AMP.sh
|
|
- apt-get update && apt-get install -y cmake
|
|
- pip install -r ./requirements/base.txt --user -q --upgrade-strategy only-if-needed
|
|
- pip install -r ./requirements/devel.txt --user -q --upgrade-strategy only-if-needed
|
|
#- pip install -r ./requirements/docs.txt --user -q
|
|
- pip install -r ./requirements/examples.txt --user -q --upgrade-strategy only-if-needed
|
|
- pip list
|
|
- python -c "import torch ; print(' & '.join([torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]) if torch.cuda.is_available() else 'only CPU')"
|
|
- coverage run --source pytorch_lightning -m py.test pytorch_lightning tests -v --durations=25 # --flake8
|
|
- python -m py.test benchmarks pl_examples -v --maxfail=2 --durations=0 # --flake8
|
|
#- cd docs; make doctest; make coverage
|
|
- coverage report
|
|
# see: https://docs.codecov.io/docs/merging-reports
|
|
- codecov --token $CODECOV_TOKEN --flags=gpu,pytest --name="GPU-coverage" --env=linux --build $DRONE_BUILD_NUMBER --commit $DRONE_COMMIT
|
|
# --build $DRONE_BUILD_NUMBER --branch $DRONE_BRANCH --commit $DRONE_COMMIT --tag $DRONE_TAG --pr $DRONE_PULL_REQUEST
|
|
# - codecov --token $CODECOV_TOKEN --flags=gpu,pytest --build $DRONE_BUILD_NUMBER
|
|
- python tests/collect_env_details.py
|
|
|
|
trigger:
|
|
branch:
|
|
- master
|
|
event:
|
|
include:
|
|
- push
|
|
- pull_request
|
|
|
|
#volumes:
|
|
# - name: pip_cache
|
|
# host:
|
|
# path: /tmp/cache/drone/pip
|