summaryrefslogtreecommitdiff
path: root/tests/find
diff options
context:
space:
mode:
authorBernhard Voelker <mail@bernhard-voelker.de>2019-01-09 00:24:34 +0100
committerBernhard Voelker <mail@bernhard-voelker.de>2019-01-09 00:24:34 +0100
commit8a2f24d5ff73ebe31df9a2a3bdfa4a53825204bb (patch)
treea0b77b3d938f6c0b959676ebb90aac3e983aea1d /tests/find
parentf72b299cdda03fafb6dfad1fcf289ff50cb5ddf5 (diff)
downloadfindutils-8a2f24d5ff73ebe31df9a2a3bdfa4a53825204bb.tar.gz
tests: migrate 'many-dir-entries-vs-OOM' to the new testsuite
For migrating, merge with the basic structure from 'tests/sample-test', i.e., source in 'tests/init.sh', call 'print_ver_', "Exit $fail", etc. Also remove now-common functions like die() and framework_failure_(). * find/testsuite/sv-34079.sh: Move to ... * tests/find/many-dir-entries-vs-OOM.sh: ... this, and apply the above. While at it, loosely pull in changes from the related test in the GNU coreutils [1]: Create only 200,000 files, rather than 4 million. The latter was overkill, and was too likely to fail due to inode exhaustion. Now that this test doesn't take so long, label it as merely "expensive", rather than "very expensive". Furthermore, simplify the test data creation, and use the ulimit specifically for each program under test (find, oldfind). * find/testsuite/Makefile.am (tests_shell_progs): Remove the reference to this test ... * tests/local.mk (all_tests): .. and add it here. [1] https://git.sv.gnu.org/cgit/coreutils.git/tree/tests/rm/many-dir-entries-vs-OOM.sh?id=4711c49312d5
Diffstat (limited to 'tests/find')
-rwxr-xr-xtests/find/many-dir-entries-vs-OOM.sh94
1 files changed, 94 insertions, 0 deletions
diff --git a/tests/find/many-dir-entries-vs-OOM.sh b/tests/find/many-dir-entries-vs-OOM.sh
new file mode 100755
index 00000000..4f0f2f6f
--- /dev/null
+++ b/tests/find/many-dir-entries-vs-OOM.sh
@@ -0,0 +1,94 @@
+#!/bin/sh
+# This test verifies that find does not have excessive memory consumption
+# even for large directories.
+# See Savannah bug #34079.
+
+# Copyright (C) 2011-2019 Free Software Foundation, Inc.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+. "${srcdir=.}/tests/init.sh"
+print_ver_ find oldfind
+
+# Mark as expensive.
+expensive_
+
+NFILES=200000
+
+# Require seq(1) for this test - which may not be available
+# on some systems, e.g on some *BSDs.
+seq -f "_%04g" 0 2 >/dev/null 2>&1 \
+ || skip_ "required utility 'seq' missing"
+
+# Get the number of free inodes on the file system of the given file/directory.
+get_ifree_() {
+ d="$1"
+ # Try GNU coreutils' stat.
+ stat --format='%d' -f -- "$d" 2>/dev/null \
+ && return 0
+
+ # Fall back to parsing 'df -i' output.
+ df -i -- "$d" \
+ | awk '
+ NR == 1 { # Find ifree column.
+ ifree = -1;
+ for (i=1; i<=NF; i++) {
+ n=tolower($i);
+ if(n=="ifree" || n=="iavail") {
+ ifree=i;
+ }
+ };
+ if (ifree<=0) {
+ print "failed to determine IFREE column in header: ", $0 | "cat 1>&2";
+ exit 1;
+ }
+ next;
+ }
+ { print $ifree }
+ ' \
+ | grep .
+}
+
+# Skip early if we know that there are too few free inodes.
+# Require some slack.
+free_inodes=$(get_ifree_ '.') \
+ && test 0 -lt $free_inodes \
+ && min_free_inodes=$(expr 12 \* ${NFILES} / 10) \
+ && { test $min_free_inodes -lt $free_inodes \
+ || skip_ "too few free inodes on '.': $free_inodes;" \
+ "this test requires at least $min_free_inodes"; }
+
+# Create directory with many entries.
+mkdir dir && cd dir || framework_failure_
+seq ${NFILES} | xargs touch || framework_failure_
+cd ..
+
+# Create a small directory as reference to determine lower ulimit.
+mkdir dir2 && touch dir2/a dir2/b dir2/c || framework_failure_
+
+# We don't check oldfind, as it uses savedir, meaning that
+# it stores all the directory entries. Hence the excessive
+# memory consumption bug applies to oldfind even though it is
+# not using fts.
+for exe in find oldfind; do
+ # Determine memory consumption for the trivial case.
+ vm="$(get_min_ulimit_v_ ${exe} dir2 -fprint dummy)" \
+ || skip_ "this shell lacks ulimit support"
+
+ # Allow 35MiB more memory than above.
+ ( ulimit -v $(($vm + 35000)) && ${exe} dir >/dev/null ) \
+ || { echo "${exe}: memory consumption is too high" >&2; fail=1; }
+done
+
+Exit $fail