summaryrefslogtreecommitdiff
path: root/rts/Weak.c
blob: 0adf5a8b926d6a5ecbd1bd272a6556f227a13627 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
/* -----------------------------------------------------------------------------
 *
 * (c) The GHC Team, 1998-1999
 *
 * Weak pointers / finalizers
 *
 * ---------------------------------------------------------------------------*/

#include "PosixSource.h"
#include "Rts.h"
#include "RtsAPI.h"

#include "RtsUtils.h"
#include "Weak.h"
#include "Schedule.h"
#include "Prelude.h"
#include "ThreadLabels.h"
#include "Trace.h"

// List of dead weak pointers collected by the last GC
static StgWeak *finalizer_list = NULL;

// Count of the above list.
static uint32_t n_finalizers = 0;

void
runCFinalizers(StgCFinalizerList *list)
{
    StgCFinalizerList *head;
    for (head = list;
        (StgClosure *)head != &stg_NO_FINALIZER_closure;
        head = (StgCFinalizerList *)head->link)
    {
        if (head->flag)
            ((void (*)(void *, void *))head->fptr)(head->eptr, head->ptr);
        else
            ((void (*)(void *))head->fptr)(head->ptr);
    }
}

void
runAllCFinalizers(StgWeak *list)
{
    StgWeak *w;
    Task *task;

    task = myTask();
    if (task != NULL) {
        task->running_finalizers = true;
    }

    for (w = list; w; w = w->link) {
        // We need to filter out DEAD_WEAK objects, because it's not guaranteed
        // that the list will not have them when shutting down.
        // They only get filtered out during GC for the generation they
        // belong to.
        // If there's no major GC between the time that the finalizer for the
        // object from the oldest generation is manually called and shutdown
        // we end up running the same finalizer twice. See #7170.
        const StgInfoTable *winfo = ACQUIRE_LOAD(&w->header.info);
        if (winfo != &stg_DEAD_WEAK_info) {
            runCFinalizers((StgCFinalizerList *)w->cfinalizers);
        }
    }

    if (task != NULL) {
        task->running_finalizers = false;
    }
}

/*
 * scheduleFinalizers() is called on the list of weak pointers found
 * to be dead after a garbage collection.  It overwrites each object
 * with DEAD_WEAK, and creates a new thread to run the pending finalizers.
 *
 * This function is called just after GC.  The weak pointers on the
 * argument list are those whose keys were found to be not reachable,
 * however the value and finalizer fields have by now been marked live.
 * The weak pointer object itself may not be alive - i.e. we may be
 * looking at either an object in from-space or one in to-space.  It
 * doesn't really matter either way.
 *
 * Pre-condition: sched_mutex _not_ held.
 */

void
scheduleFinalizers(Capability *cap, StgWeak *list)
{
    StgWeak *w;
    StgTSO *t;
    StgMutArrPtrs *arr;
    StgWord size;
    uint32_t n, i;

    // n_finalizers is not necessarily zero under non-moving collection
    // because non-moving collector does not wait for the list to be consumed
    // (by doIdleGcWork()) before appending the list with more finalizers.
    ASSERT(RtsFlags.GcFlags.useNonmoving || SEQ_CST_LOAD(&n_finalizers) == 0);

    // Append finalizer_list with the new list. TODO: Perhaps cache tail of the
    // list for faster append. NOTE: We can't append `list` here! Otherwise we
    // end up traversing already visited weaks in the loops below.
    StgWeak **tl = &finalizer_list;
    while (*tl) {
        tl = &(*tl)->link;
    }
    SEQ_CST_STORE(tl, list);

    // Traverse the list and
    //  * count the number of Haskell finalizers
    //  * overwrite all the weak pointers with DEAD_WEAK
    n = 0;
    i = 0;
    for (w = list; w; w = w->link) {
        // Better not be a DEAD_WEAK at this stage; the garbage
        // collector removes DEAD_WEAKs from the weak pointer list.
        ASSERT(w->header.info != &stg_DEAD_WEAK_info);

        if (w->finalizer != &stg_NO_FINALIZER_closure) {
            n++;
        }

        // Remember the length of the list, for runSomeFinalizers() below
        i++;

#if defined(PROFILING)
        // A weak pointer is inherently used, so we do not need to call
        // LDV_recordDead().
        //
        // Furthermore, when PROFILING is turned on, dead weak
        // pointers are exactly as large as weak pointers, so there is
        // no need to fill the slop, either.  See stg_DEAD_WEAK_info
        // in StgMiscClosures.cmm.
#endif

        // We must overwrite the header with DEAD_WEAK, so that if
        // there's a later call to finalizeWeak# on this weak pointer,
        // we don't run the finalizer again.
        SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs);
    }

    SEQ_CST_ADD(&n_finalizers, i);

    // No Haskell finalizers to run?
    if (n == 0) return;

    debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);

    size = n + mutArrPtrsCardTableSize(n);
    arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size);
    TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
    // No write barrier needed here; this array is only going to referred to by this core.
    SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info, CCS_SYSTEM);
    arr->ptrs = n;
    arr->size = size;

    n = 0;
    for (w = list; w; w = w->link) {
        if (w->finalizer != &stg_NO_FINALIZER_closure) {
            arr->payload[n] = w->finalizer;
            n++;
        }
    }
    // set all the cards to 1
    for (i = n; i < size; i++) {
        arr->payload[i] = (StgClosure *)(W_)(-1);
    }

    t = createIOThread(cap,
                       RtsFlags.GcFlags.initialStkSize,
                       rts_apply(cap,
                           rts_apply(cap,
                               (StgClosure *)runFinalizerBatch_closure,
                               rts_mkInt(cap,n)),
                           (StgClosure *)arr)
        );

    scheduleThread(cap,t);
    labelThread(cap, t, "weak finalizer thread");
}

/* -----------------------------------------------------------------------------
   Incrementally running C finalizers

   The GC detects all the dead finalizers, but we don't want to run
   them during the GC because that increases the time that the runtime
   is paused.

   What options are there?

   1. Parallelise running the C finalizers across the GC threads
      - doesn't solve the pause problem, just reduces it (maybe by a lot)

   2. Make a Haskell thread to run the C finalizers, like we do for
      Haskell finalizers.
      + scheduling is handled for us
      - no guarantee that we'll process finalizers in a timely manner

   3. Run finalizers when any capability is idle.
      + reduces pause to 0
      - requires scheduler modifications
      - if the runtime is busy, finalizers wait until the next GC

   4. like (3), but also run finalizers incrementally between GCs.
      - reduces the delay to run finalizers compared with (3)

   For now we do (3). It would be easy to do (4) later by adding a
   call to doIdleGCWork() in the scheduler loop, but I haven't found
   that necessary so far.

   -------------------------------------------------------------------------- */

// Run this many finalizers before returning from
// runSomeFinalizers(). This is so that we only tie up the capability
// for a short time, and respond quickly if new work becomes
// available.
static const int32_t finalizer_chunk = 100;

// non-zero if a thread is already in runSomeFinalizers(). This
// protects the globals finalizer_list and n_finalizers.
static volatile StgWord finalizer_lock = 0;

//
// Run some C finalizers.  Returns true if there's more work to do.
//
bool runSomeFinalizers(bool all)
{
    if (RELAXED_LOAD(&n_finalizers) == 0)
        return false;

    if (cas(&finalizer_lock, 0, 1) != 0) {
        // another capability is doing the work, it's safe to say
        // there's nothing to do, because the thread already in
        // runSomeFinalizers() will call in again.
        return false;
    }

    debugTrace(DEBUG_sched, "running C finalizers, %d remaining", n_finalizers);

    Task *task = myTask();
    if (task != NULL) {
        task->running_finalizers = true;
    }

    StgWeak *w = finalizer_list;
    int32_t count = 0;
    while (w != NULL) {
        runCFinalizers((StgCFinalizerList *)w->cfinalizers);
        w = w->link;
        ++count;
        if (!all && count >= finalizer_chunk) break;
    }

    RELAXED_STORE(&finalizer_list, w);
    SEQ_CST_ADD(&n_finalizers, -count);

    if (task != NULL) {
        task->running_finalizers = false;
    }

    debugTrace(DEBUG_sched, "ran %d C finalizers", count);
    bool ret = n_finalizers != 0;
    RELEASE_STORE(&finalizer_lock, 0);
    return ret;
}