summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorPaul Moore <paul@paul-moore.com>2021-01-04 21:25:19 -0500
committerPaul Moore <paul@paul-moore.com>2021-01-14 09:54:41 -0500
commit2380f5788c692796f75e464c61aa877e5c4eb882 (patch)
tree20657031fdbc1b1be76091f5f91cb6dc241c7a87 /tests
parent2ddf36ca5869dc886a93bb5ad8b7039b5156a03a (diff)
downloadlibseccomp-2380f5788c692796f75e464c61aa877e5c4eb882.tar.gz
tests: add basic support for running tests in parallel
In the beginning it didn't matter that we were running the regression tests serially, but as we are now running +16k tests the run time is getting rather long. As there is no good reason why we can't run these tests in parallel, let's add some basic support to do so. This patch adds support for running multiple tests jobs at once using the '-j <NUM>' flag, similar to the "make" command. If the number of jobs specified is invalid/zero then it is set to the number of CPUs present on the system. If the '-j <NUM>' flag is not specified then the tests are executed in serial fashion as they are done now. If the '-l <LOG>' option is specified the test run reverts to serial execution regardless of the command line in order to preserve the log output. While the normal console output is preserved regardless of the number of jobs, the logfile output is handled differently and this restriction was the easiest solution. We should consider removing the '-l <LOG>' option at some point since output capture and redirection is likely best handled by the shell anyway. As far as the performance improvements are concerned, the results speak for themselves. On my eight core laptop the runtime drops from ~14 minutes to ~4 minutes (!). * Existing code, single threaded (14 minutes, 14 seconds) % time -- ./regression -m c -m python =============== Tue Jan 5 06:11:52 PM EST 2021 =============== Regression Test Report ("regression -m c -m python") batch name: 01-sim-allow test mode: c test type: bpf-sim Test 01-sim-allow%%001-00001 result: SUCCESS ... Test 58-live-tsync_notify%%001-00001 result: SKIPPED (must specify live tests) Regression Test Summary tests run: 16412 tests skipped: 130 tests passed: 16412 tests failed: 0 tests errored: 0 ============================================================ real 854.37 user 693.87 sys 269.25 * Patched code, 8 jobs (4 minutes, 7 seconds) % time -- ./regression -j 8 -m c -m python =============== Tue Jan 5 06:27:56 PM EST 2021 =============== Regression Test Report ("regression -j 8 -m c -m python") batch name: 01-sim-allow test mode: c test type: bpf-sim Test 01-sim-allow%%001-00001 result: SUCCESS ... Test 58-live-tsync_notify%%001-00001 result: SKIPPED (must specify live tests) Regression Test Summary tests run: 16412 tests skipped: 130 tests passed: 16412 tests failed: 0 tests errored: 0 ============================================================ real 246.96 user 966.08 sys 251.27 Reviewed-by: Tom Hromatka <tom.hromatka@oracle.com> Signed-off-by: Paul Moore <paul@paul-moore.com>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/regression180
1 files changed, 132 insertions, 48 deletions
diff --git a/tests/regression b/tests/regression
index 1496294..4f3f2ac 100755
--- a/tests/regression
+++ b/tests/regression
@@ -89,12 +89,13 @@ function verify_deps() {
#
function usage() {
cat << EOF
-usage: regression [-h] [-v] [-m MODE] [-a] [-b BATCH_NAME] [-l <LOG>]
- [-s SINGLE_TEST] [-t <TEMP_DIR>] [-T <TEST_TYPE>]
+usage: regression [-h] [-v] [-j JOBS] [-m MODE] [-a] [-b BATCH_NAME]
+ [-l <LOG>] [-s SINGLE_TEST] [-t <TEMP_DIR>] [-T <TEST_TYPE>]
libseccomp regression test automation script
optional arguments:
-h show this help message and exit
+ -j JOBS run up to JOBS test jobs in parallel
-m MODE specified the test mode [c (default), python]
can also be set via LIBSECCOMP_TSTCFG_MODE_LIST env variable
-a specifies all tests are to be run
@@ -881,12 +882,114 @@ function run_test() {
}
#
+# Run the requested test batch
+#
+# Arguments:
+# 1 Batch name
+#
+function run_test_batch() {
+ local testnum=1
+ local batch_name=$1
+
+ # open temporary file
+ if [[ -n $tmpdir ]]; then
+ tmpfile=$(mktemp -t regression_XXXXXX --tmpdir=$tmpdir)
+ else
+ tmpfile=$(mktemp -t regression_XXXXXX)
+ fi
+
+ # reset the stats
+ stats_all=0
+ stats_skipped=0
+ stats_success=0
+ stats_failure=0
+ stats_error=0
+
+ # print a test batch header
+ echo " batch name: $batch_name" >&$logfd
+
+ # loop through each line and run the requested tests
+ while read line; do
+ # strip whitespace, comments, and blank lines
+ line=$(echo "$line" | \
+ sed -e 's/^[\t ]*//;s/[\t ]*$//;' | \
+ sed -e '/^[#].*$/d;/^$/d')
+ if [[ -z $line ]]; then
+ continue
+ fi
+
+ if [[ $line =~ ^"test type": ]]; then
+ test_type=$(echo "$line" | \
+ sed -e 's/^test type: //;')
+ # print a test mode and type header
+ echo " test mode: $mode" >&$logfd
+ echo " test type: $test_type" >&$logfd
+ continue
+ fi
+
+ if [[ ${single_list[@]} ]]; then
+ for i in ${single_list[@]}; do
+ if [ $i -eq $testnum ]; then
+ # we're running a single test
+ run_test "$batch_name" \
+ $testnum "$line" \
+ "$test_type"
+ fi
+ done
+ else
+ # we're running a test from a batch
+ run_test "$batch_name" \
+ $testnum "$line" "$test_type"
+ fi
+ testnum=$(($testnum+1))
+ done < "$file"
+
+
+ # dump our stats
+ local stats=$batch_name.$mode.stats
+ > $stats
+ echo -n "$stats_all $stats_skipped $stats_success " >> $stats
+ echo -n "$stats_failure $stats_error " >> $stats
+ echo "" >> $stats
+
+ # cleanup the temporary file we created
+ rm -f $tmpfile
+}
+
+#
+# Run the requested test batch
+#
+# Arguments:
+# 1 Log file
+# 2 PID to watch
+#
+function tail_log() {
+ local log=$1
+ local pid=$2
+
+ # dump the output
+ tail -n +0 --pid=$pid -f $log
+
+ # accumulate the stats
+ local stats=$(echo $log | sed 's/\.log$/.stats/')
+ stats_all=$(( $stats_all + $(awk '{ print $1 }' $stats) ))
+ stats_skipped=$(( $stats_skipped + $(awk '{ print $2 }' $stats) ))
+ stats_success=$(( $stats_success + $(awk '{ print $3 }' $stats) ))
+ stats_failure=$(( $stats_failure + $(awk '{ print $4 }' $stats) ))
+ stats_error=$(( $stats_error + $(awk '{ print $5 }' $stats) ))
+}
+
+#
# Run the requested tests
#
function run_tests() {
+ local job_cnt=0
+ local tail_cnt=0
+ local -a job_pids
+ local -a job_logs
+
# loop through all test files
for file in $basedir/*.tests; do
- local testnum=1
local batch_requested=false
local batch_name=""
@@ -906,44 +1009,22 @@ function run_tests() {
fi
fi
- # print a test batch header
- echo " batch name: $batch_name" >&$logfd
+ # run the test batch
+ run_test_batch $batch_name >& $batch_name.$mode.log &
+ job_pids[job_cnt]=$!
+ job_logs[job_cnt]=$batch_name.$mode.log
+ job_cnt=$(( $job_cnt + 1 ))
- # loop through each line and run the requested tests
- while read line; do
- # strip whitespace, comments, and blank lines
- line=$(echo "$line" | \
- sed -e 's/^[\t ]*//;s/[\t ]*$//;' | \
- sed -e '/^[#].*$/d;/^$/d')
- if [[ -z $line ]]; then
- continue
- fi
-
- if [[ $line =~ ^"test type": ]]; then
- test_type=$(echo "$line" | \
- sed -e 's/^test type: //;')
- # print a test mode and type header
- echo " test mode: $mode" >&$logfd
- echo " test type: $test_type" >&$logfd
- continue
- fi
+ # output the next log if the job queue is full
+ if [[ $(jobs | wc -l) -ge $jobs ]]; then
+ tail_log ${job_logs[$tail_cnt]} ${job_pids[$tail_cnt]}
+ tail_cnt=$(( $tail_cnt + 1 ))
+ fi
+ done
- if [[ ${single_list[@]} ]]; then
- for i in ${single_list[@]}; do
- if [ $i -eq $testnum ]; then
- # we're running a single test
- run_test "$batch_name" \
- $testnum "$line" \
- "$test_type"
- fi
- done
- else
- # we're running a test from a batch
- run_test "$batch_name" \
- $testnum "$line" "$test_type"
- fi
- testnum=$(($testnum+1))
- done < "$file"
+ # output any leftovers
+ for i in $(seq $tail_cnt $(( $job_cnt - 1 ))); do
+ tail_log ${job_logs[$i]} ${job_pids[$i]}
done
}
@@ -970,6 +1051,7 @@ tmpfile=""
tmpdir=""
type=
verbose=
+jobs=1
stats_all=0
stats_skipped=0
stats_success=0
@@ -983,7 +1065,7 @@ basedir=$(dirname $0)
pid=$$
# parse the command line
-while getopts "ab:gl:m:s:t:T:vh" opt; do
+while getopts "ab:gj:l:m:s:t:T:vh" opt; do
case $opt in
a)
runall=1
@@ -992,6 +1074,13 @@ while getopts "ab:gl:m:s:t:T:vh" opt; do
batch_list[batch_count]="$OPTARG"
batch_count=$(($batch_count+1))
;;
+ j)
+ if [[ $OPTARG -lt 1 ]]; then
+ jobs=$(cat /proc/cpuinfo | grep "^processor" | wc -l)
+ else
+ jobs=$OPTARG
+ fi
+ ;;
l)
logfile="$OPTARG"
;;
@@ -1062,19 +1151,15 @@ fi
# open log file for append (default to stdout)
if [[ -n $logfile ]]; then
+ # force single threaded to preserve the output
+ jobs=1
+
logfd=3
exec 3>>"$logfile"
else
logfd=1
fi
-# open temporary file
-if [[ -n $tmpdir ]]; then
- tmpfile=$(mktemp -t regression_XXXXXX --tmpdir=$tmpdir)
-else
- tmpfile=$(mktemp -t regression_XXXXXX)
-fi
-
# determine the current system's architecture
arch=$($GLBL_SYS_ARCH)
@@ -1093,7 +1178,6 @@ echo " tests errored: $stats_error" >&$logfd
echo "============================================================" >&$logfd
# cleanup and exit
-rm -f $tmpfile
rc=0
[[ $stats_failure -gt 0 ]] && rc=$(($rc + 2))
[[ $stats_error -gt 0 ]] && rc=$(($rc + 4))