1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
#include "Rts.h"
#include <stdio.h>
extern bdescr *allocGroup_lock_lock(uint32_t n);
extern bdescr *allocAlignedGroupOnNode (uint32_t node, W_ n);
extern void freeGroup_lock(bdescr *p);
const int ARRSIZE = 256;
const int LOOPS = 100;
const int MAXALLOC = ((8 * 1024 * 1024) / BLOCK_SIZE - 1);
//const int MAXALLOC = ((64 * 1024 * 1024) / BLOCK_SIZE - 1);
const int SEED = 0xf00f00;
extern StgWord mblocks_allocated;
static void test_random_alloc(void)
{
bdescr *a[ARRSIZE];
// repeatedly sweep though the array, allocating new random-sized
// objects and deallocating the old ones.
for (int i=0; i < LOOPS; i++)
{
for (int j=0; j < ARRSIZE; j++)
{
if (i > 0)
{
IF_DEBUG(block_alloc, debugBelch("A%d: freeing %p, %d blocks @ %p\n", j, a[j], a[j]->blocks, a[j]->start));
freeGroup_lock(a[j]);
DEBUG_ONLY(checkFreeListSanity());
}
int b = (rand() % MAXALLOC) + 1;
a[j] = allocGroup_lock(b);
IF_DEBUG(block_alloc, debugBelch("A%d: allocated %p, %d blocks @ %p\n", j, a[j], b, a[j]->start));
// allocating zero blocks isn't allowed
DEBUG_ONLY(checkFreeListSanity());
}
}
for (int j=0; j < ARRSIZE; j++)
{
freeGroup_lock(a[j]);
}
}
static void test_sequential_alloc(void)
{
bdescr *a[ARRSIZE];
// this time, sweep forwards allocating new blocks, and then
// backwards deallocating them.
for (int i=0; i < LOOPS; i++)
{
for (int j=0; j < ARRSIZE; j++)
{
int b = (rand() % MAXALLOC) + 1;
a[j] = allocGroup_lock(b);
IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
DEBUG_ONLY(checkFreeListSanity());
}
for (int j=ARRSIZE-1; j >= 0; j--)
{
IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
freeGroup_lock(a[j]);
DEBUG_ONLY(checkFreeListSanity());
}
}
}
static void test_aligned_alloc(void)
{
bdescr *a[ARRSIZE];
// this time, sweep forwards allocating new blocks, and then
// backwards deallocating them.
for (int i=0; i < LOOPS; i++)
{
for (int j=0; j < ARRSIZE; j++)
{
// allocAlignedGroupOnNode does not support allocating more than
// BLOCKS_PER_MBLOCK/2 blocks.
int b = rand() % (BLOCKS_PER_MBLOCK / 2);
if (b == 0) { b = 1; }
a[j] = allocAlignedGroupOnNode(0, b);
if ((((W_)(a[j]->start)) % (b*BLOCK_SIZE)) != 0)
{
barf("%p is not aligned to allocation size %d", a[j], b);
}
IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
DEBUG_ONLY(checkFreeListSanity());
}
for (int j=ARRSIZE-1; j >= 0; j--)
{
IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
freeGroup_lock(a[j]);
DEBUG_ONLY(checkFreeListSanity());
}
}
}
int main (int argc, char *argv[])
{
int i, j, b;
bdescr *a[ARRSIZE];
srand(SEED);
{
RtsConfig conf = defaultRtsConfig;
conf.rts_opts_enabled = RtsOptsAll;
hs_init_ghc(&argc, &argv, conf);
}
test_random_alloc();
test_sequential_alloc();
test_aligned_alloc();
DEBUG_ONLY(checkFreeListSanity());
hs_exit(); // will do a memory leak test
exit(0);
}
|