Drop nvtx test (#19154)

This commit is contained in:
Carlos Mocholí 2023-12-14 15:43:23 +01:00 committed by GitHub
parent df869c95d0
commit 11bac946ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 2 additions and 31 deletions

View File

@ -31,7 +31,7 @@ test_path=$1
printf "source path: $test_path\n"
# collect all tests with parametrization based filtering with PL_RUN_STANDALONE_TESTS
standalone_tests=$(python -m pytest $test_path -q --collect-only --pythonwarnings ignore)
standalone_tests=$(python3 -m pytest $test_path -q --collect-only --pythonwarnings ignore)
printf "Collected tests: \n $standalone_tests"
# match only lines with tests
parametrizations=$(grep -oP '\S+::test_\S+' <<< "$standalone_tests")
@ -69,7 +69,7 @@ for i in "${!parametrizations_arr[@]}"; do
# execute the test in the background
# redirect to a log file that buffers test output. since the tests will run in the background, we cannot let them
# output to std{out,err} because the outputs would be garbled together
python ${defaults} "$parametrization" &>> standalone_test_output.txt &
python3 ${defaults} "$parametrization" &>> standalone_test_output.txt &
# save the PID in an array
pids[${i}]=$!
# add row to the final report

View File

@ -471,24 +471,6 @@ def test_pytorch_profiler_multiple_loggers(tmpdir):
assert look_for_trace(tmpdir / "lightning_logs" / "version_0")
# Todo: this test has not been running as all our CI GPU runners have higher capacity
# @RunIf(min_cuda_gpus=1, standalone=True)
# @pytest.mark.skipif(torch.cuda.get_device_capability()[0] >= 8)
# def test_pytorch_profiler_nested_emit_nvtx():
# """This test check emit_nvtx is correctly supported."""
# profiler = PyTorchProfiler(use_cuda=True, emit_nvtx=True)
# model = BoringModel()
# trainer = Trainer(
# fast_dev_run=True,
# profiler=profiler,
# accelerator="gpu",
# devices=1,
# enable_progress_bar=False,
# enable_model_summary=False,
# )
# trainer.fit(model)
def test_register_record_function(tmpdir):
use_cuda = torch.cuda.is_available()
pytorch_profiler = PyTorchProfiler(

View File

@ -18,17 +18,6 @@ set -e
# this environment variable allows special tests to run
export PL_RUN_STANDALONE_TESTS=1
#can_run_nvprof=$(python -c "import torch; print(torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8)")
#if [[ $can_run_nvprof == "True" ]]; then
# echo "Running profilers/test_profiler.py::test_pytorch_profiler_nested_emit_nvtx"
# nvprof --profile-from-start off \
# -o trace_name.prof \
# -- python -m coverage run \
# --source lightning.pytorch \
# --append -m pytest \
# --no-header profilers/test_profiler.py::test_pytorch_profiler_nested_emit_nvtx
#fi
# test that a user can manually launch individual processes
echo "Running manual ddp launch test"
export PYTHONPATH="${PYTHONPATH}:$(pwd)"