1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
|
/* -----------------------------------------------------------------------------
*
* (c) The GHC Team, 2001
* Author: Sungwoo Park
*
* Lag/Drag/Void profiling.
*
* ---------------------------------------------------------------------------*/
#ifdef PROFILING
#include "Rts.h"
#include "LdvProfile.h"
#include "RtsFlags.h"
#include "Profiling.h"
#include "Stats.h"
#include "RtsUtils.h"
#include "Schedule.h"
/* --------------------------------------------------------------------------
* Fills in the slop when a *dynamic* closure changes its type.
* First calls LDV_recordDead() to declare the closure is dead, and then
* fills in the slop.
*
* Invoked when:
* 1) blackholing, UPD_BH_UPDATABLE() and UPD_BH_SINGLE_ENTRY (in
* includes/StgMacros.h), threadLazyBlackHole() and
* threadSqueezeStack() (in GC.c).
* 2) updating with indirection closures, updateWithIndirection()
* and updateWithPermIndirection() (in Storage.h).
*
* LDV_recordDead_FILL_SLOP_DYNAMIC() is not called on 'inherently used'
* closures such as TSO. It is not called on PAP because PAP is not updatable.
* ----------------------------------------------------------------------- */
void
LDV_recordDead_FILL_SLOP_DYNAMIC( StgClosure *p )
{
nat size, i;
#if defined(__GNUC__) && __GNUC__ < 3 && defined(DEBUG)
#error Please use gcc 3.0+ to compile this file with DEBUG; gcc < 3.0 miscompiles it
#endif
if (era > 0) {
// very like FILL_SLOP(), except that we call LDV_recordDead().
size = closure_sizeW(p);
LDV_recordDead((StgClosure *)(p), size);
if (size > sizeofW(StgThunkHeader)) {
for (i = 0; i < size - sizeofW(StgThunkHeader); i++) {
((StgThunk *)(p))->payload[i] = 0;
}
}
}
}
/* --------------------------------------------------------------------------
* This function is called eventually on every object destroyed during
* a garbage collection, whether it is a major garbage collection or
* not. If c is an 'inherently used' closure, nothing happens. If c
* is an ordinary closure, LDV_recordDead() is called on c with its
* proper size which excludes the profiling header portion in the
* closure. Returns the size of the closure, including the profiling
* header portion, so that the caller can find the next closure.
* ----------------------------------------------------------------------- */
STATIC_INLINE nat
processHeapClosureForDead( StgClosure *c )
{
nat size;
const StgInfoTable *info;
info = get_itbl(c);
info = c->header.info;
if (IS_FORWARDING_PTR(info)) {
// The size of the evacuated closure is currently stored in
// the LDV field. See SET_EVACUAEE_FOR_LDV() in
// includes/StgLdvProf.h.
return LDVW(c);
}
info = INFO_PTR_TO_STRUCT(info);
ASSERT(((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) <= era &&
((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) > 0);
ASSERT(((LDVW(c) & LDV_STATE_MASK) == LDV_STATE_CREATE) ||
(
(LDVW(c) & LDV_LAST_MASK) <= era &&
(LDVW(c) & LDV_LAST_MASK) > 0
));
size = closure_sizeW(c);
switch (info->type) {
/*
'inherently used' cases: do nothing.
*/
case TSO:
case MVAR_CLEAN:
case MVAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
case ARR_WORDS:
case WEAK:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case BCO:
case STABLE_NAME:
case TVAR_WATCH_QUEUE:
case TVAR:
case TREC_HEADER:
case TREC_CHUNK:
case INVARIANT_CHECK_QUEUE:
case ATOMIC_INVARIANT:
return size;
/*
ordinary cases: call LDV_recordDead().
*/
case THUNK:
case THUNK_1_0:
case THUNK_0_1:
case THUNK_SELECTOR:
case THUNK_2_0:
case THUNK_1_1:
case THUNK_0_2:
case AP:
case PAP:
case AP_STACK:
case CONSTR:
case CONSTR_1_0:
case CONSTR_0_1:
case CONSTR_2_0:
case CONSTR_1_1:
case CONSTR_0_2:
case FUN:
case FUN_1_0:
case FUN_0_1:
case FUN_2_0:
case FUN_1_1:
case FUN_0_2:
case BLACKHOLE:
case CAF_BLACKHOLE:
case IND_PERM:
case IND_OLDGEN_PERM:
/*
'Ingore' cases
*/
// Why can we ignore IND/IND_OLDGEN closures? We assume that
// any census is preceded by a major garbage collection, which
// IND/IND_OLDGEN closures cannot survive. Therefore, it is no
// use considering IND/IND_OLDGEN closures in the meanwhile
// because they will perish before the next census at any
// rate.
case IND:
case IND_OLDGEN:
// Found a dead closure: record its size
LDV_recordDead(c, size);
return size;
/*
Error case
*/
// static objects
case IND_STATIC:
case CONSTR_STATIC:
case FUN_STATIC:
case THUNK_STATIC:
case CONSTR_NOCAF_STATIC:
// stack objects
case UPDATE_FRAME:
case CATCH_FRAME:
case STOP_FRAME:
case RET_DYN:
case RET_BCO:
case RET_SMALL:
case RET_BIG:
// others
case BLOCKED_FETCH:
case FETCH_ME:
case FETCH_ME_BQ:
case RBH:
case REMOTE_REF:
case INVALID_OBJECT:
default:
barf("Invalid object in processHeapClosureForDead(): %d", info->type);
return 0;
}
}
/* --------------------------------------------------------------------------
* Calls processHeapClosureForDead() on every *dead* closures in the
* heap blocks starting at bd.
* ----------------------------------------------------------------------- */
static void
processHeapForDead( bdescr *bd )
{
StgPtr p;
while (bd != NULL) {
p = bd->start;
while (p < bd->free) {
p += processHeapClosureForDead((StgClosure *)p);
while (p < bd->free && !*p) // skip slop
p++;
}
ASSERT(p == bd->free);
bd = bd->link;
}
}
/* --------------------------------------------------------------------------
* Calls processHeapClosureForDead() on every *dead* closures in the nursery.
* ----------------------------------------------------------------------- */
static void
processNurseryForDead( void )
{
StgPtr p, bdLimit;
bdescr *bd;
bd = MainCapability.r.rNursery->blocks;
while (bd->start < bd->free) {
p = bd->start;
bdLimit = bd->start + BLOCK_SIZE_W;
while (p < bd->free && p < bdLimit) {
p += processHeapClosureForDead((StgClosure *)p);
while (p < bd->free && p < bdLimit && !*p) // skip slop
p++;
}
bd = bd->link;
if (bd == NULL)
break;
}
}
/* --------------------------------------------------------------------------
* Calls processHeapClosureForDead() on every *dead* closures in the
* small object pool.
* ----------------------------------------------------------------------- */
static void
processSmallObjectPoolForDead( void )
{
bdescr *bd;
StgPtr p;
for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
p = bd->start;
while (p < bd->free) {
p += processHeapClosureForDead((StgClosure *)p);
while (p < bd->free && !*p) // skip slop
p++;
}
ASSERT(p == bd->free);
}
}
/* --------------------------------------------------------------------------
* Calls processHeapClosureForDead() on every *dead* closures in the closure
* chain.
* ----------------------------------------------------------------------- */
static void
processChainForDead( bdescr *bd )
{
// Any object still in the chain is dead!
while (bd != NULL) {
processHeapClosureForDead((StgClosure *)bd->start);
bd = bd->link;
}
}
/* --------------------------------------------------------------------------
* Start a census for *dead* closures, and calls
* processHeapClosureForDead() on every closure which died in the
* current garbage collection. This function is called from a garbage
* collector right before tidying up, when all dead closures are still
* stored in the heap and easy to identify. Generations 0 through N
* have just beed garbage collected.
* ----------------------------------------------------------------------- */
void
LdvCensusForDead( nat N )
{
nat g, s;
// ldvTime == 0 means that LDV profiling is currently turned off.
if (era == 0)
return;
if (RtsFlags.GcFlags.generations == 1) {
//
// Todo: support LDV for two-space garbage collection.
//
barf("Lag/Drag/Void profiling not supported with -G1");
} else {
for (g = 0; g <= N; g++)
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) {
processSmallObjectPoolForDead();
processNurseryForDead();
processChainForDead(generations[g].steps[s].large_objects);
} else{
processHeapForDead(generations[g].steps[s].old_blocks);
processChainForDead(generations[g].steps[s].large_objects);
}
}
}
}
/* --------------------------------------------------------------------------
* Regard any closure in the current heap as dead or moribund and update
* LDV statistics accordingly.
* Called from shutdownHaskell() in RtsStartup.c.
* Also, stops LDV profiling by resetting ldvTime to 0.
* ----------------------------------------------------------------------- */
void
LdvCensusKillAll( void )
{
LdvCensusForDead(RtsFlags.GcFlags.generations - 1);
}
#endif /* PROFILING */
|