summaryrefslogtreecommitdiff
path: root/rts/sm/NonMoving.h
blob: 91144f6f9b5401df9cc1fb2eec5bbdf102733170 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
/* -----------------------------------------------------------------------------
 *
 * (c) The GHC Team, 1998-2018
 *
 * Non-moving garbage collector and allocator
 *
 * ---------------------------------------------------------------------------*/

#pragma once

#if !defined(CMINUSMINUS)

#include <string.h>
#include "HeapAlloc.h"
#include "NonMovingMark.h"

#include "BeginPrivate.h"

// Segments
#define NONMOVING_SEGMENT_BITS 15UL   // 2^15 = 32kByte
// Mask to find base of segment
#define NONMOVING_SEGMENT_MASK ((1UL << NONMOVING_SEGMENT_BITS) - 1)
// In bytes
#define NONMOVING_SEGMENT_SIZE (1UL << NONMOVING_SEGMENT_BITS)
// In words
#define NONMOVING_SEGMENT_SIZE_W ((1UL << NONMOVING_SEGMENT_BITS) / SIZEOF_VOID_P)
// In blocks
#define NONMOVING_SEGMENT_BLOCKS (NONMOVING_SEGMENT_SIZE / BLOCK_SIZE)

GHC_STATIC_ASSERT(NONMOVING_SEGMENT_SIZE % BLOCK_SIZE == 0, "non-moving segment size must be multiple of block size");

GHC_STATIC_ASSERT(NONMOVING_SEGMENT_BLOCKS * 2 <= BLOCKS_PER_MBLOCK, "non-moving segment size must not exceed half of mblock size");

// The index of a block within a segment
typedef uint16_t nonmoving_block_idx;

#if defined(DEBUG)
#define TRACK_SEGMENT_STATE
#endif

#if defined(TRACK_SEGMENT_STATE)
// The collector itself doesn't require each segment to know its state (this is
// implied by what segment list it is on) however it can be very useful while
// debugging to know this.
enum NonmovingSegmentState {
    FREE, CURRENT, ACTIVE, FILLED, FILLED_SWEEPING
};

#define SET_SEGMENT_STATE(seg, st) RELAXED_STORE(&(seg)->state, (st))
#define ASSERT_SEGMENT_STATE(seg, st) ASSERT(RELAXED_LOAD(&(seg)->state) == (st))
#else
#define SET_SEGMENT_STATE(_seg,_st)
#define ASSERT_SEGMENT_STATE(_seg, _st)
#endif

// A non-moving heap segment
struct NonmovingSegment {
    struct NonmovingSegment *link;      // for linking together segments into lists
    struct NonmovingSegment *todo_link; // NULL when not in todo list
    nonmoving_block_idx next_free;      // index of the next unallocated block
#if defined(TRACK_SEGMENT_STATE)
    enum NonmovingSegmentState state;
#endif
    uint8_t bitmap[];                   // liveness bitmap
    // After the liveness bitmap comes the data blocks. Note that we need to
    // ensure that the size of this struct (including the bitmap) is a multiple
    // of the word size since GHC assumes that all object pointers are
    // so-aligned.

    // N.B. There are also bits of information which are stored in the
    // NonmovingBlockInfo stored in the segment's block descriptor. Namely:
    //
    //  * the block size can be found in nonmovingBlockInfo(seg)->log_block_size.
    //  * the next_free snapshot can be found in
    //    nonmovingBlockInfo(seg)->next_free_snap.
    //
    // This allows us to mark a nonmoving closure without bringing the
    // NonmovingSegment header into cache.
};

// This is how we mark end of todo lists. Not NULL because todo_link == NULL
// means segment is not in list.
#define END_NONMOVING_TODO_LIST ((struct NonmovingSegment*)1)

// A non-moving allocator for a particular block size
struct NonmovingAllocator {
    struct NonmovingSegment *filled;
    struct NonmovingSegment *saved_filled;
    struct NonmovingSegment *active;
    // indexed by capability number
    struct NonmovingSegment *current[];
};

// first allocator is of size 2^NONMOVING_ALLOCA0 (in bytes)
#define NONMOVING_ALLOCA0 3

// allocators cover block sizes of 2^NONMOVING_ALLOCA0 to
// 2^(NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT) (in bytes)
#define NONMOVING_ALLOCA_CNT 12

// maximum number of free segments to hold on to
#define NONMOVING_MAX_FREE 16

struct NonmovingHeap {
    struct NonmovingAllocator *allocators[NONMOVING_ALLOCA_CNT];
    // free segment list. This is a cache where we keep up to
    // NONMOVING_MAX_FREE segments to avoid thrashing the block allocator.
    // Note that segments in this list are still counted towards
    // oldest_gen->n_blocks.
    struct NonmovingSegment *free;
    // how many segments in free segment list? accessed atomically.
    unsigned int n_free;

    // records the current length of the nonmovingAllocator.current arrays
    unsigned int n_caps;

    // The set of segments being swept in this GC. Segments are moved here from
    // the filled list during preparation and moved back to either the filled,
    // active, or free lists during sweep.  Should be NULL before mark and
    // after sweep.
    struct NonmovingSegment *sweep_list;
};

extern struct NonmovingHeap nonmovingHeap;

extern memcount nonmoving_live_words;

#if defined(THREADED_RTS)
extern bool concurrent_coll_running;
extern Mutex nonmoving_collection_mutex;
#endif

void nonmovingInit(void);
void nonmovingStop(void);
void nonmovingExit(void);


// dead_weaks and resurrected_threads lists are used for two things:
//
// - The weaks and threads in those lists are found to be dead during
//   preparation, but the weaks will be used for finalization and threads will
//   be scheduled again (aka. resurrection) so we need to keep them alive in the
//   non-moving heap as well. So we treat them as roots and mark them.
//
// - In non-threaded runtime we add weaks and threads found to be dead in the
//   non-moving heap to those lists so that they'll be finalized and scheduled
//   as other weaks and threads. In threaded runtime we can't do this as that'd
//   cause races between a minor collection and non-moving collection. Instead
//   in non-moving heap we finalize the weaks and resurrect the threads
//   directly, but in a pause.
//
void nonmovingCollect(StgWeak **dead_weaks,
                       StgTSO **resurrected_threads);

void *nonmovingAllocate(Capability *cap, StgWord sz);
void nonmovingAddCapabilities(uint32_t new_n_caps);
void nonmovingPushFreeSegment(struct NonmovingSegment *seg);
void nonmovingClearBitmap(struct NonmovingSegment *seg);


INLINE_HEADER struct NonmovingSegmentInfo *nonmovingSegmentInfo(struct NonmovingSegment *seg) {
    return &Bdescr((StgPtr) seg)->nonmoving_segment;
}

INLINE_HEADER uint8_t nonmovingSegmentLogBlockSize(struct NonmovingSegment *seg) {
    return nonmovingSegmentInfo(seg)->log_block_size;
}

// Add a segment to the appropriate active list.
INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
{
    struct NonmovingAllocator *alloc =
        nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
    SET_SEGMENT_STATE(seg, ACTIVE);
    while (true) {
        struct NonmovingSegment *current_active = RELAXED_LOAD(&alloc->active);
        seg->link = current_active;
        if (cas((StgVolatilePtr) &alloc->active, (StgWord) current_active, (StgWord) seg) == (StgWord) current_active) {
            break;
        }
    }
}

// Add a segment to the appropriate filled list.
INLINE_HEADER void nonmovingPushFilledSegment(struct NonmovingSegment *seg)
{
    struct NonmovingAllocator *alloc =
        nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
    SET_SEGMENT_STATE(seg, FILLED);
    while (true) {
        struct NonmovingSegment *current_filled = (struct NonmovingSegment*) RELAXED_LOAD(&alloc->filled);
        RELAXED_STORE(&seg->link, current_filled);
        if (cas((StgVolatilePtr) &alloc->filled, (StgWord) current_filled, (StgWord) seg) == (StgWord) current_filled) {
            break;
        }
    }
}
// Assert that the pointer can be traced by the non-moving collector (e.g. in
// mark phase). This means one of the following:
//
// - A static object
// - A large object
// - An object in the non-moving heap (e.g. in one of the segments)
//
void assert_in_nonmoving_heap(StgPtr p);

// The block size of a given segment in bytes.
INLINE_HEADER unsigned int nonmovingSegmentBlockSize(struct NonmovingSegment *seg)
{
    return 1 << nonmovingSegmentLogBlockSize(seg);
}

// How many blocks does a segment with the given block size have?
INLINE_HEADER unsigned int nonmovingBlockCount(uint8_t log_block_size)
{
  unsigned int segment_data_size = NONMOVING_SEGMENT_SIZE - sizeof(struct NonmovingSegment);
  segment_data_size -= segment_data_size % SIZEOF_VOID_P;
  unsigned int blk_size = 1 << log_block_size;
  // N.B. +1 accounts for the byte in the mark bitmap.
  return segment_data_size / (blk_size + 1);
}

unsigned int nonmovingBlockCountFromSize(uint8_t log_block_size);

// How many blocks does the given segment contain? Also the size of the bitmap.
INLINE_HEADER unsigned int nonmovingSegmentBlockCount(struct NonmovingSegment *seg)
{
  return nonmovingBlockCountFromSize(nonmovingSegmentLogBlockSize(seg));
}

// Get a pointer to the given block index assuming that the block size is as
// given (avoiding a potential cache miss when this information is already
// available). The log_block_size argument must be equal to seg->block_size.
INLINE_HEADER void *nonmovingSegmentGetBlock_(struct NonmovingSegment *seg, uint8_t log_block_size, nonmoving_block_idx i)
{
  ASSERT(log_block_size == nonmovingSegmentLogBlockSize(seg));
  // Block size in bytes
  unsigned int blk_size = 1 << log_block_size;
  // Bitmap size in bytes
  W_ bitmap_size = nonmovingBlockCountFromSize(log_block_size) * sizeof(uint8_t);
  // Where the actual data starts (address of the first block).
  // Use ROUNDUP_BYTES_TO_WDS to align to word size. Note that
  // ROUNDUP_BYTES_TO_WDS returns in _words_, not in _bytes_, so convert it back
  // back to bytes by multiplying with word size.
  W_ data = ROUNDUP_BYTES_TO_WDS(((W_)seg) + sizeof(struct NonmovingSegment) + bitmap_size) * sizeof(W_);
  return (void*)(data + i*blk_size);
}

// Get a pointer to the given block index.
INLINE_HEADER void *nonmovingSegmentGetBlock(struct NonmovingSegment *seg, nonmoving_block_idx i)
{
  return nonmovingSegmentGetBlock_(seg, nonmovingSegmentLogBlockSize(seg), i);
}

// Get the segment which a closure resides in. Assumes that pointer points into
// non-moving heap.
INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment_unchecked(StgPtr p)
{
    const uintptr_t mask = ~NONMOVING_SEGMENT_MASK;
    return (struct NonmovingSegment *) (((uintptr_t) p) & mask);
}

INLINE_HEADER bool nonmovingIsInSegment(StgPtr p)
{
    bdescr *bd = Bdescr(p);
    return HEAP_ALLOCED_GC(p) &&
        (bd->flags & BF_NONMOVING) &&
        !(bd->flags & BF_LARGE);
}

INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment(StgPtr p)
{
    ASSERT(nonmovingIsInSegment(p));
    return nonmovingGetSegment_unchecked(p);
}

INLINE_HEADER nonmoving_block_idx nonmovingGetBlockIdx(StgPtr p)
{
    struct NonmovingSegment *seg = nonmovingGetSegment(p);
    ptrdiff_t blk0 = (ptrdiff_t)nonmovingSegmentGetBlock(seg, 0);
    ptrdiff_t offset = (ptrdiff_t)p - blk0;
    return (nonmoving_block_idx) (offset >> nonmovingSegmentLogBlockSize(seg));
}

// TODO: Eliminate this
extern uint8_t nonmovingMarkEpoch;

INLINE_HEADER void nonmovingSetMark(struct NonmovingSegment *seg, nonmoving_block_idx i)
{
    RELAXED_STORE(&seg->bitmap[i], nonmovingMarkEpoch);
}

INLINE_HEADER uint8_t nonmovingGetMark(struct NonmovingSegment *seg, nonmoving_block_idx i)
{
    return RELAXED_LOAD(&seg->bitmap[i]);
}

INLINE_HEADER void nonmovingSetClosureMark(StgPtr p)
{
    nonmovingSetMark(nonmovingGetSegment(p), nonmovingGetBlockIdx(p));
}

INLINE_HEADER uint8_t nonmovingGetClosureMark(StgPtr p)
{
    struct NonmovingSegment *seg = nonmovingGetSegment(p);
    nonmoving_block_idx blk_idx = nonmovingGetBlockIdx(p);
    return nonmovingGetMark(seg, blk_idx);
}

/* Was the given closure marked this major GC cycle? */
INLINE_HEADER bool nonmovingClosureMarkedThisCycle(StgPtr p)
{
    return nonmovingGetClosureMark(p) == nonmovingMarkEpoch;
}

// Can be called during a major collection to determine whether a particular
// segment is in the set of segments that will be swept this collection cycle.
INLINE_HEADER bool nonmovingSegmentBeingSwept(struct NonmovingSegment *seg)
{
    struct NonmovingSegmentInfo *seginfo = nonmovingSegmentInfo(seg);
    unsigned int n = nonmovingBlockCountFromSize(seginfo->log_block_size);
    return seginfo->next_free_snap >= n;
}

// Can be called during a major collection to determine whether a particular
// closure lives in a segment that will be swept this collection cycle.
// Note that this returns true for both large and normal objects.
INLINE_HEADER bool nonmovingClosureBeingSwept(StgClosure *p)
{
    bdescr *bd = Bdescr((StgPtr) p);
    if (HEAP_ALLOCED_GC(p)) {
        if (bd->flags & BF_NONMOVING_SWEEPING) {
            return true;
        } else if (bd->flags & BF_NONMOVING) {
            struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
            return nonmovingSegmentBeingSwept(seg);
        } else {
            // outside of the nonmoving heap
            return false;
        }
    } else {
        // a static object
        return true;
    }
}

INLINE_HEADER bool isNonmovingClosure(StgClosure *p)
{
    return RtsFlags.GcFlags.useNonmoving && (!HEAP_ALLOCED_GC(p) || Bdescr((P_)p)->flags & BF_NONMOVING);
}

#if defined(DEBUG)

void nonmovingPrintSegment(struct NonmovingSegment *seg);
void nonmovingPrintAllocator(struct NonmovingAllocator *alloc);
void locate_object(P_ obj);
void nonmovingPrintSweepList(void);
// Check if the object is in one of non-moving heap mut_lists
void check_in_mut_list(StgClosure *p);
void print_block_list(bdescr *bd);
void print_thread_list(StgTSO* tso);

#endif

RTS_PRIVATE void clear_segment(struct NonmovingSegment*);

RTS_PRIVATE void clear_segment_free_blocks(struct NonmovingSegment*);

#include "EndPrivate.h"

#endif // CMINUSMINUS