diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 17029d2817..0bb4f5cfff 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -82,7 +82,7 @@ jobs: - bash: | python -m coverage report python -m coverage xml - codecov --token $(CODECOV_TOKEN) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure + codecov --token=$(CODECOV_TOKEN) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure displayName: 'Statistics' - script: | diff --git a/tests/accelerators/legacy/test_ddp.py b/tests/accelerators/legacy/test_ddp.py index 0e7d6948c1..48cef6d595 100644 --- a/tests/accelerators/legacy/test_ddp.py +++ b/tests/accelerators/legacy/test_ddp.py @@ -72,30 +72,6 @@ def test_multi_gpu_model_ddp_fit_test(tmpdir, cli_args): assert out['test_acc'] > 0.90 -# START: test_cli ddp test -@pytest.mark.skipif(os.getenv("PL_IN_LAUNCHER", '0') == '1', reason="test runs only in DDPLauncher") -def internal_test_cli(tmpdir, args=None): - """ - This test verify we can call function using test_cli name - """ - - return 1 - - -@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine") -def test_cli(tmpdir): - DDPLauncher.run_from_cmd_line("--max_epochs 1 --gpus 2 --accelerator ddp", internal_test_cli, tmpdir) - # load the results of the script - result_path = os.path.join(tmpdir, 'ddp.result') - result = torch.load(result_path) - # verify the file wrote the expected outputs - assert result['status'] == 'complete' - assert str(result['result']) == '1' - - -# END: test_cli ddp test - - @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine") @DDPLauncher.run( "--max_epochs [max_epochs] --gpus 2 --accelerator [accelerator]",