2021-03-26 17:04:59 +00:00
|
|
|
#!/bin/bash
|
2020-12-07 12:55:49 +00:00
|
|
|
# Copyright The Lightning AI team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2021-01-05 10:01:59 +00:00
|
|
|
set -e
|
2022-09-01 22:13:12 +00:00
|
|
|
# THIS FILE ASSUMES IT IS RUN INSIDE THE tests/tests_<package> DIRECTORY
|
2021-03-26 17:04:59 +00:00
|
|
|
|
2022-07-27 12:36:22 +00:00
|
|
|
# Batch size for testing: Determines how many standalone test invocations run in parallel
|
2022-07-28 23:33:22 +00:00
|
|
|
# It can be set through the env variable PL_STANDALONE_TESTS_BATCH_SIZE and defaults to 6 if not set
|
|
|
|
test_batch_size="${PL_STANDALONE_TESTS_BATCH_SIZE:-6}"
|
2023-02-20 14:58:44 +00:00
|
|
|
source="${PL_STANDALONE_TESTS_SOURCE:-"lightning"}"
|
2022-07-27 12:36:22 +00:00
|
|
|
|
2021-03-26 17:04:59 +00:00
|
|
|
# this environment variable allows special tests to run
|
2021-11-26 17:13:14 +00:00
|
|
|
export PL_RUN_STANDALONE_TESTS=1
|
2021-03-26 17:04:59 +00:00
|
|
|
# python arguments
|
2023-12-13 19:27:49 +00:00
|
|
|
defaults=" -m coverage run --source ${source} --append -m pytest --no-header -v -s --timeout 120 "
|
2023-02-20 14:58:44 +00:00
|
|
|
echo "Using defaults: ${defaults}"
|
2021-03-26 17:04:59 +00:00
|
|
|
|
2024-06-05 16:32:56 +00:00
|
|
|
# get the testing location as the first argument
|
2023-12-13 19:27:49 +00:00
|
|
|
test_path=$1
|
|
|
|
printf "source path: $test_path\n"
|
2021-11-17 15:46:14 +00:00
|
|
|
|
2023-12-13 19:27:49 +00:00
|
|
|
# collect all tests with parametrization based filtering with PL_RUN_STANDALONE_TESTS
|
2023-12-14 14:43:23 +00:00
|
|
|
standalone_tests=$(python3 -m pytest $test_path -q --collect-only --pythonwarnings ignore)
|
2024-06-05 16:32:56 +00:00
|
|
|
printf "Collected tests: \n $standalone_tests\n"
|
2023-12-13 19:27:49 +00:00
|
|
|
# match only lines with tests
|
2024-06-05 16:32:56 +00:00
|
|
|
parametrizations=$(perl -nle 'print $& while m{\S+::test_\S+}g' <<< "$standalone_tests")
|
2023-12-13 19:27:49 +00:00
|
|
|
# convert the list to be array
|
2021-11-17 15:46:14 +00:00
|
|
|
parametrizations_arr=($parametrizations)
|
2021-03-26 17:04:59 +00:00
|
|
|
report=''
|
2022-07-18 12:10:35 +00:00
|
|
|
|
|
|
|
rm -f standalone_test_output.txt # in case it exists, remove it
|
2023-12-21 02:22:25 +00:00
|
|
|
rm -f testnames.txt
|
|
|
|
|
2022-07-18 12:10:35 +00:00
|
|
|
function show_batched_output {
|
|
|
|
if [ -f standalone_test_output.txt ]; then # if exists
|
|
|
|
cat standalone_test_output.txt
|
2022-11-05 03:29:38 +00:00
|
|
|
# heuristic: stop if there's mentions of errors. this can prevent false negatives when only some of the ranks fail
|
2024-06-05 16:32:56 +00:00
|
|
|
if perl -nle 'print if /error|(?<!(?-i)on_)exception|traceback|(?<!(?-i)x)failed/i' standalone_test_output.txt | grep -qv -f testnames.txt; then
|
2022-11-05 03:29:38 +00:00
|
|
|
echo "Potential error! Stopping."
|
|
|
|
rm standalone_test_output.txt
|
|
|
|
exit 1
|
|
|
|
fi
|
2022-07-18 12:10:35 +00:00
|
|
|
rm standalone_test_output.txt
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
trap show_batched_output EXIT # show the output on exit
|
2021-03-26 17:04:59 +00:00
|
|
|
|
2023-12-13 19:27:49 +00:00
|
|
|
# remove the "tests/tests_pytorch/" path suffixes
|
|
|
|
path_prefix=$(basename "$(dirname "$(pwd)")")/$(basename "$(pwd)")"/" # https://stackoverflow.com/a/8223345
|
2023-12-21 02:22:25 +00:00
|
|
|
|
2021-11-17 15:46:14 +00:00
|
|
|
for i in "${!parametrizations_arr[@]}"; do
|
2023-12-13 19:27:49 +00:00
|
|
|
parametrization=${parametrizations_arr[$i]//$path_prefix/}
|
2023-04-28 09:13:53 +00:00
|
|
|
prefix="$((i+1))/${#parametrizations_arr[@]}"
|
2021-03-26 17:04:59 +00:00
|
|
|
|
2023-12-13 19:27:49 +00:00
|
|
|
echo "$prefix: Running $parametrization"
|
2023-12-21 02:22:25 +00:00
|
|
|
echo $parametrization | sed 's/\[[^][]*\]//g' >> testnames.txt
|
2023-11-21 22:11:00 +00:00
|
|
|
|
2023-12-13 19:27:49 +00:00
|
|
|
# fix the port to avoid race condition when batched distributed tests select the port randomly
|
|
|
|
export MASTER_PORT=$((29500 + $i % $test_batch_size))
|
2023-11-21 22:11:00 +00:00
|
|
|
|
2023-12-13 19:27:49 +00:00
|
|
|
# execute the test in the background
|
|
|
|
# redirect to a log file that buffers test output. since the tests will run in the background, we cannot let them
|
|
|
|
# output to std{out,err} because the outputs would be garbled together
|
2023-12-14 14:43:23 +00:00
|
|
|
python3 ${defaults} "$parametrization" &>> standalone_test_output.txt &
|
2023-12-13 19:27:49 +00:00
|
|
|
# save the PID in an array
|
|
|
|
pids[${i}]=$!
|
|
|
|
# add row to the final report
|
|
|
|
report+="Ran\t$parametrization\n"
|
2021-03-26 17:04:59 +00:00
|
|
|
|
2022-07-18 12:10:35 +00:00
|
|
|
if ((($i + 1) % $test_batch_size == 0)); then
|
|
|
|
# wait for running tests
|
|
|
|
for pid in ${pids[*]}; do wait $pid; done
|
|
|
|
unset pids # empty the array
|
|
|
|
show_batched_output
|
|
|
|
fi
|
2021-03-26 17:04:59 +00:00
|
|
|
done
|
2022-07-18 12:10:35 +00:00
|
|
|
# wait for leftover tests
|
|
|
|
for pid in ${pids[*]}; do wait $pid; done
|
|
|
|
show_batched_output
|
2021-07-14 11:25:36 +00:00
|
|
|
|
2021-03-26 17:04:59 +00:00
|
|
|
# echo test report
|
|
|
|
printf '=%.s' {1..80}
|
|
|
|
printf "\n$report"
|
|
|
|
printf '=%.s' {1..80}
|
|
|
|
printf '\n'
|