blob: 4f0f2f6fbe0e24bb1af7dd2564bd9282b1b789d2 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
|
#!/bin/sh
# This test verifies that find does not have excessive memory consumption
# even for large directories.
# See Savannah bug #34079.
# Copyright (C) 2011-2019 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"
print_ver_ find oldfind
# Mark as expensive.
expensive_
NFILES=200000
# Require seq(1) for this test - which may not be available
# on some systems, e.g on some *BSDs.
seq -f "_%04g" 0 2 >/dev/null 2>&1 \
|| skip_ "required utility 'seq' missing"
# Get the number of free inodes on the file system of the given file/directory.
get_ifree_() {
d="$1"
# Try GNU coreutils' stat.
stat --format='%d' -f -- "$d" 2>/dev/null \
&& return 0
# Fall back to parsing 'df -i' output.
df -i -- "$d" \
| awk '
NR == 1 { # Find ifree column.
ifree = -1;
for (i=1; i<=NF; i++) {
n=tolower($i);
if(n=="ifree" || n=="iavail") {
ifree=i;
}
};
if (ifree<=0) {
print "failed to determine IFREE column in header: ", $0 | "cat 1>&2";
exit 1;
}
next;
}
{ print $ifree }
' \
| grep .
}
# Skip early if we know that there are too few free inodes.
# Require some slack.
free_inodes=$(get_ifree_ '.') \
&& test 0 -lt $free_inodes \
&& min_free_inodes=$(expr 12 \* ${NFILES} / 10) \
&& { test $min_free_inodes -lt $free_inodes \
|| skip_ "too few free inodes on '.': $free_inodes;" \
"this test requires at least $min_free_inodes"; }
# Create directory with many entries.
mkdir dir && cd dir || framework_failure_
seq ${NFILES} | xargs touch || framework_failure_
cd ..
# Create a small directory as reference to determine lower ulimit.
mkdir dir2 && touch dir2/a dir2/b dir2/c || framework_failure_
# We don't check oldfind, as it uses savedir, meaning that
# it stores all the directory entries. Hence the excessive
# memory consumption bug applies to oldfind even though it is
# not using fts.
for exe in find oldfind; do
# Determine memory consumption for the trivial case.
vm="$(get_min_ulimit_v_ ${exe} dir2 -fprint dummy)" \
|| skip_ "this shell lacks ulimit support"
# Allow 35MiB more memory than above.
( ulimit -v $(($vm + 35000)) && ${exe} dir >/dev/null ) \
|| { echo "${exe}: memory consumption is too high" >&2; fail=1; }
done
Exit $fail
|