summaryrefslogtreecommitdiff
path: root/doc/unit-tests.txt
blob: 55bbcebe77746acaeb3554c4bdd11a415ee02ec5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
Building unit tests
===================

  make unit-unit/unit-test


Running unit tests
==================

The tests leave no artifacts at the moment, so you can just run
unit-test/unit-test from wherever you want.

  ./unit-test <list|run> [pattern]

Listing tests
-------------

Every test has a symbolic path associated with it.  Just like file paths they
are split into components separated by '/'s.  The 'list' command will show you
a tree of these tests, along with some description text.


ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test list
base
  data-struct
    bitset
      and  .................................................  and all bits
      equal  ...............................................  equality
      get_next  ............................................  get next set bit
    list
      splice  ..............................................  joining lists together
    string
      asprint  .............................................  tests asprint
      strncpy  .............................................  tests string copying
  device
    bcache
      block-size-multiple-page  ............................  block size must be a multiple of page size
      block-size-positive  .................................  block size must be positive
      blocks-get-evicted  ..................................  block get evicted with many reads
      cache-blocks-positive  ...............................  nr cache blocks must be positive
      create-destroy  ......................................  simple create/destroy
      flush-waits  .........................................  flush waits for all dirty
      get-reads  ...........................................  bcache_get() triggers read
      prefetch-never-waits  ................................  too many prefetches does not trigger a wait
      prefetch-reads  ......................................  prefetch issues a read
      read-multiple-files  .................................  read from multiple files
      reads-cached  ........................................  repeated reads are cached
      writeback-occurs  ....................................  dirty data gets written back
      zero-flag-dirties  ...................................  zeroed data counts as dirty
  formatting
    percent
      0  ...................................................  Pretty printing of percentages near 0%
      100  .................................................  Pretty printing of percentages near 100%
  regex
    fingerprints  ..........................................  not sure
    matching  ..............................................  test the matcher with a variety of regexes
dm
  target
    mirror
      status  ..............................................  parsing mirror status
metadata
  config
    cascade  ...............................................  cascade
    clone  .................................................  duplicating a config tree
    parse  .................................................  parsing various


An optional 'pattern' argument may be specified to select subsets of tests.
This pattern is a posix regex and does a substring match, so you will need to
use anchors if you particularly want the match at the beginning or end of the
string.

ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test list data-struct
base
  data-struct
    bitset
      and  .................................................  and all bits
      equal  ...............................................  equality
      get_next  ............................................  get next set bit
    list
      splice  ..............................................  joining lists together
    string
      asprint  .............................................  tests asprint
      strncpy  .............................................  tests string copying

ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test list s$
base
  device
    bcache
      flush-waits  .........................................  flush waits for all dirty
      get-reads  ...........................................  bcache_get() triggers read
      prefetch-never-waits  ................................  too many prefetches does not trigger a wait
      prefetch-reads  ......................................  prefetch issues a read
      read-multiple-files  .................................  read from multiple files
      writeback-occurs  ....................................  dirty data gets written back
      zero-flag-dirties  ...................................  zeroed data counts as dirty
  regex
    fingerprints  ..........................................  not sure
dm
  target
    mirror
      status  ..............................................  parsing mirror status


Running tests
=============

'make run-unit-test' from the top level will run all unit tests.  But I tend to
run it by hand to I can select just the tests I'm working on.

Use the 'run' command to run the tests.  Currently all logging goes to stderr,
so the test runner prints a line at the start of the test and a line
indicating success or failure at the end.

ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test run bcache/block-size
[RUN    ] /base/device/bcache/block-size-multiple-page
bcache block size must be a multiple of page size
bcache block size must be a multiple of page size
bcache block size must be a multiple of page size
bcache block size must be a multiple of page size
[     OK] /base/device/bcache/block-size-multiple-page

[RUN    ] /base/device/bcache/block-size-positive
bcache must have a non zero block size
[     OK] /base/device/bcache/block-size-positive


2/2 tests passed


ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test run data-struct
[RUN    ] /base/data-struct/bitset/and
[     OK] /base/data-struct/bitset/and

[RUN    ] /base/data-struct/bitset/equal
[     OK] /base/data-struct/bitset/equal

[RUN    ] /base/data-struct/bitset/get_next
[     OK] /base/data-struct/bitset/get_next

[RUN    ] /base/data-struct/list/splice
[     OK] /base/data-struct/list/splice

[RUN    ] /base/data-struct/string/asprint
[     OK] /base/data-struct/string/asprint

[RUN    ] /base/data-struct/string/strncpy
[     OK] /base/data-struct/string/strncpy


6/6 tests passed


Writing tests
=============

[See unit-test/framework.h and unit-test/units.h for the details]

Tests are grouped together into 'suites', all tests in a suite share a
'fixture'.  A fixture is a void * to any object you want; use it to set up any
common environment that you need for the tests to run (eg, creating a dm_pool).

Test suites have nothing to do with the test paths, you can have tests from
different suites with similar paths, the runner sorts things for you.

Put your tests in a file in unit-test/, with '_t' at the end of the name
(convention only, nothing relies on this).

#include "units.h"

Then write any fixtures you need:

eg,
static void *_mem_init(void) {
	struct dm_pool *mem = dm_pool_create("bitset test", 1024);
	if (!mem) {
		fprintf(stderr, "out of memory\n");
		exit(1);
	}

	return mem;
}

static void _mem_exit(void *mem)
{
	dm_pool_destroy(mem);
}

Then write your tests, which should take the void * that was returned by your
fixture.  Use the T_ASSERT* macros to indicate failure.

eg,
static void test_equal(void *fixture)
{
	struct dm_pool *mem = fixture;
        dm_bitset_t bs1 = dm_bitset_create(mem, NR_BITS);
        dm_bitset_t bs2 = dm_bitset_create(mem, NR_BITS);

        int i, j;
        for (i = 0, j = 1; i < NR_BITS; i += j, j++) {
                dm_bit_set(bs1, i);
                dm_bit_set(bs2, i);
        }

        T_ASSERT(dm_bitset_equal(bs1, bs2));
        T_ASSERT(dm_bitset_equal(bs2, bs1));

        for (i = 0; i < NR_BITS; i++) {
                bit_flip(bs1, i);
                T_ASSERT(!dm_bitset_equal(bs1, bs2));
                T_ASSERT(!dm_bitset_equal(bs2, bs1));

                T_ASSERT(dm_bitset_equal(bs1, bs1)); /* comparing with self */
                bit_flip(bs1, i);
        }
}

At the end of your test file you should write a function that builds one or
more test suites and adds them to the list of all suites that is passed in.  I
tend to write a little macro (T) to save typing the same test path repeatedly.

eg,
#define T(path, desc, fn) register_test(ts, "/base/data-struct/bitset/" path, desc, fn)

void bitset_tests(struct dm_list *all_tests)
{
	struct test_suite *ts = test_suite_create(_mem_init, _mem_exit);
	if (!ts) {
		fprintf(stderr, "out of memory\n");
		exit(1);
	}

	T("get_next", "get next set bit", test_get_next);
	T("equal", "equality", test_equal);
	T("and", "and all bits", test_and);

	dm_list_add(all_tests, &ts->list);
}

Then you need to declare your registration function and call it in units.h.


// Declare the function that adds tests suites here ...
  ...
void bitset_tests(struct dm_list *suites);
  ...

// ... and call it in here.
static inline void register_all_tests(struct dm_list *suites)
{
	...
	bitset_tests(suites);
	...
}

Finally add your test file to the Makefile.in and rerun configure.