summaryrefslogtreecommitdiff
path: root/psi/ireclaim.c
blob: a1fe97d62e717f286ab3feee63b8d8c48c1c9720 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
/* Copyright (C) 2001-2023 Artifex Software, Inc.
   All Rights Reserved.

   This software is provided AS-IS with no warranty, either express or
   implied.

   This software is distributed under license and may not be copied,
   modified or distributed except as expressly authorized under the terms
   of the license contained in the file LICENSE in this distribution.

   Refer to licensing information at http://www.artifex.com or contact
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
   CA 94129, USA, for further information.
*/


/* Interpreter's interface to garbage collector */
#include "ghost.h"
#include "ierrors.h"
#include "gsstruct.h"
#include "iastate.h"
#include "icontext.h"
#include "interp.h"
#include "isave.h"		/* for isstate.h */
#include "isstate.h"		/* for mem->saved->state */
#include "dstack.h"		/* for dsbot, dsp, dict_set_top */
#include "estack.h"		/* for esbot, esp */
#include "ostack.h"		/* for osbot, osp */
#include "opdef.h"		/* for defining init procedure */
#include "store.h"		/* for make_array */

/* Import preparation and cleanup routines. */
extern void ialloc_gc_prepare(gs_ref_memory_t *);

/* Forward references */
static int gs_vmreclaim(gs_dual_memory_t *, bool);

/* Initialize the GC hook in the allocator. */
static int ireclaim(gs_dual_memory_t *, int);
static int
ireclaim_init(i_ctx_t *i_ctx_p)
{
    gs_imemory.reclaim = ireclaim;
    return 0;
}

/* GC hook called when the allocator signals a GC is needed (space = -1), */
/* or for vmreclaim (space = the space to collect). */
static int
ireclaim(gs_dual_memory_t * dmem, int space)
{
    bool global;
    gs_ref_memory_t *mem = NULL;
    int code;

    if (space < 0) {
        /* Determine which allocator exceeded the limit. */
        int i;

        for (i = 0; i < countof(dmem->spaces_indexed); i++) {
            mem = dmem->spaces_indexed[i];
            if (mem == 0)
                continue;
            if (mem->gc_status.requested > 0 ||
                ((gs_ref_memory_t *)mem->stable_memory)->gc_status.requested > 0
                )
                break;
        }
        if (!mem) {
            mem = dmem->space_global; /* just in case */
        }
    } else {
        mem = dmem->spaces_indexed[space >> r_space_shift];
    }
    if_debug3m('0', (gs_memory_t *)mem, "[0]GC called, space=%d, requestor=%d, requested=%ld\n",
               space, mem->space, (long)mem->gc_status.requested);
    global = mem->space != avm_local;
    /* Since dmem may move, reset the request now. */
    ialloc_reset_requested(dmem);
    code = gs_vmreclaim(dmem, global);
    if (code < 0)
        return code;
    ialloc_set_limit(mem);
    if (space < 0) {
        gs_memory_status_t stats;
        size_t allocated;

        /* If the ammount still allocated after the GC is complete */
        /* exceeds the max_vm setting, then return a VMerror       */
        gs_memory_status((gs_memory_t *) mem, &stats);
        allocated = stats.allocated;
        if (mem->stable_memory != (gs_memory_t *)mem) {
            gs_memory_status(mem->stable_memory, &stats);
            allocated += stats.allocated;
        }
        if (allocated >= mem->gc_status.max_vm) {
            /* We can't satisfy this request within max_vm. */
            return_error(gs_error_VMerror);
        }
    }
    return 0;
}

/* Interpreter entry to garbage collector. */
static int
gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
{
    /* HACK: we know the gs_dual_memory_t is embedded in a context state. */
    i_ctx_t *i_ctx_p =
        (i_ctx_t *)((char *)dmem - offset_of(i_ctx_t, memory));
    gs_ref_memory_t *lmem = dmem->space_local;
    int code = context_state_store(i_ctx_p);
    gs_ref_memory_t *memories[5];
    gs_ref_memory_t *mem;
    int nmem, i;

    if (code < 0)
        return code;

    memories[0] = dmem->space_system;
    memories[1] = mem = dmem->space_global;
    nmem = 2;
    if (lmem != dmem->space_global)
        memories[nmem++] = lmem;
    for (i = nmem; --i >= 0;) {
        mem = memories[i];
        if (mem->stable_memory != (gs_memory_t *)mem)
            memories[nmem++] = (gs_ref_memory_t *)mem->stable_memory;
    }

    /****** ABORT IF code < 0 ******/
    for (i = nmem; --i >= 0; )
        alloc_close_clump(memories[i]);

    /* Prune the file list so it won't retain potentially collectible */
    /* files. */

    for (i = (global ? i_vm_system : i_vm_local);
         i < countof(dmem->spaces_indexed);
         ++i
         ) {
        gs_ref_memory_t *mem = dmem->spaces_indexed[i];

        /* Always safe to substract 1 from i here, as i is always at
         * least i_vm_system (1) or i_vm_local (2). */
        if (mem == 0 || (mem == dmem->spaces_indexed[i - 1]))
            continue;
        if (mem->stable_memory != (gs_memory_t *)mem)
            ialloc_gc_prepare((gs_ref_memory_t *)mem->stable_memory);
        for (;; mem = &mem->saved->state) {
            ialloc_gc_prepare(mem);
            if (mem->saved == 0)
                break;
        }
    }

    /* Do the actual collection. */

    {
        void *ctxp = i_ctx_p;
        gs_gc_root_t context_root, *r = &context_root;

        gs_register_struct_root((gs_memory_t *)lmem, &r,
                                &ctxp, "i_ctx_p root");
        GS_RECLAIM(&dmem->spaces, global);
        gs_unregister_root((gs_memory_t *)lmem, r, "i_ctx_p root");
        i_ctx_p = ctxp;
        dmem = &i_ctx_p->memory;
    }

    /* Update caches not handled by context_state_load. */

    *systemdict = *ref_stack_index(&d_stack, ref_stack_count(&d_stack) - 1);

    /* Update the cached value pointers in names. */

    dicts_gc_cleanup();

    /* Reopen the active clumps. */

    for (i = 0; i < nmem; ++i)
        alloc_open_clump(memories[i]);

    /* Reload the context state.  Note this should be done
       AFTER the clumps are reopened, since the context state
       load could do allocations that must remain.
       If it were done while the clumps were still closed,
       we would lose those allocations when the clumps were opened */

    code = context_state_load(i_ctx_p);
    return code;
}

/* ------ Initialization procedure ------ */

const op_def ireclaim_l2_op_defs[] =
{
    op_def_end(ireclaim_init)
};