summaryrefslogtreecommitdiff
path: root/psi
diff options
context:
space:
mode:
authorRobin Watts <robin.watts@artifex.com>2016-04-22 18:22:30 +0100
committerRobin Watts <robin.watts@artifex.com>2016-04-22 18:47:23 +0100
commit15ee3fb57b660a796f8ac40c8784cfb165405336 (patch)
treec164e343b6c654d14953fc1561fd50fee73b4f38 /psi
parente749f3d65d203edbc7c140ec8fd76b66805d27ff (diff)
downloadghostpdl-15ee3fb57b660a796f8ac40c8784cfb165405336.tar.gz
Change standard allocator chunks to clumps.
We are in the confusing position of having a standard allocator that allocates in chunks, along with a chunked allocator. To avoid this confusing clash of terminology, we do some global renames to change the standard allocator to allocate in 'clumps' instead.
Diffstat (limited to 'psi')
-rw-r--r--psi/ialloc.c12
-rw-r--r--psi/igc.c180
-rw-r--r--psi/igc.h8
-rw-r--r--psi/igcstr.c42
-rw-r--r--psi/igcstr.h10
-rw-r--r--psi/ilocate.c84
-rw-r--r--psi/ireclaim.c12
-rw-r--r--psi/isave.c134
-rw-r--r--psi/zcontext.c8
9 files changed, 245 insertions, 245 deletions
diff --git a/psi/ialloc.c b/psi/ialloc.c
index 6e6002580..ef1e8d34a 100644
--- a/psi/ialloc.c
+++ b/psi/ialloc.c
@@ -186,7 +186,7 @@ gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs,
* - Large chunk: pcc unchanged, end != cc.cbot.
* - New chunk: pcc changed.
*/
- chunk_t *pcc = mem->pcc;
+ clump_t *pcc = mem->pcc;
ref *end;
alloc_change_t *cp = 0;
int code = 0;
@@ -213,11 +213,11 @@ gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs,
/* Large chunk. */
/* This happens only for very large arrays, */
/* so it doesn't need to be cheap. */
- chunk_locator_t cl;
+ clump_locator_t cl;
cl.memory = mem;
cl.cp = mem->root;
- chunk_locate_ptr(obj, &cl);
+ clump_locate_ptr(obj, &cl);
cl.cp->has_refs = true;
}
if (cp) {
@@ -309,11 +309,11 @@ gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname)
/* See if this array has a chunk all to itself. */
/* We only make this check when freeing very large objects, */
/* so it doesn't need to be cheap. */
- chunk_locator_t cl;
+ clump_locator_t cl;
cl.memory = mem;
cl.cp = mem->root;
- if (chunk_locate_ptr(obj, &cl) &&
+ if (clump_locate_ptr(obj, &cl) &&
obj == (ref *) ((obj_header_t *) (cl.cp->cbase) + 1) &&
(byte *) (obj + (num_refs + 1)) == cl.cp->cend
) {
@@ -323,7 +323,7 @@ gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname)
num_refs, (ulong) obj);
if ((gs_memory_t *)mem != mem->stable_memory)
alloc_save_remove(mem, (ref_packed *)obj, "gs_free_ref_array");
- alloc_free_chunk(cl.cp, mem);
+ alloc_free_clump(cl.cp, mem);
return;
}
}
diff --git a/psi/igc.c b/psi/igc.c
index 04b57dd3f..c36bb3e8b 100644
--- a/psi/igc.c
+++ b/psi/igc.c
@@ -67,17 +67,17 @@ struct gc_mark_stack_s {
/* Forward references */
static void gc_init_mark_stack(gc_mark_stack *, uint);
-static void gc_objects_clear_marks(const gs_memory_t *mem, chunk_t *);
+static void gc_objects_clear_marks(const gs_memory_t *mem, clump_t *);
static void gc_unmark_names(name_table *, op_array_table *, op_array_table *);
static int gc_trace(gs_gc_root_t *, gc_state_t *, gc_mark_stack *);
-static int gc_rescan_chunk(chunk_t *, gc_state_t *, gc_mark_stack *);
-static int gc_trace_chunk(const gs_memory_t *mem, chunk_t *, gc_state_t *, gc_mark_stack *);
+static int gc_rescan_clump(clump_t *, gc_state_t *, gc_mark_stack *);
+static int gc_trace_clump(const gs_memory_t *mem, clump_t *, gc_state_t *, gc_mark_stack *);
static bool gc_trace_finish(gc_state_t *);
-static void gc_clear_reloc(chunk_t *);
-static void gc_objects_set_reloc(gc_state_t * gcst, chunk_t *);
-static void gc_do_reloc(chunk_t *, gs_ref_memory_t *, gc_state_t *);
-static void gc_objects_compact(chunk_t *, gc_state_t *);
-static void gc_free_empty_chunks(gs_ref_memory_t *);
+static void gc_clear_reloc(clump_t *);
+static void gc_objects_set_reloc(gc_state_t * gcst, clump_t *);
+static void gc_do_reloc(clump_t *, gs_ref_memory_t *, gc_state_t *);
+static void gc_objects_compact(clump_t *, gc_state_t *);
+static void gc_free_empty_clumps(gs_ref_memory_t *);
/* Forward references for pointer types */
static ptr_proc_unmark(ptr_struct_unmark);
@@ -167,7 +167,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
int min_collect_vm_space; /* min VM space to collect */
int ispace;
gs_ref_memory_t *mem;
- chunk_t *cp;
+ clump_t *cp;
gs_gc_root_t *rp;
gc_state_t state;
struct _msd {
@@ -176,7 +176,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
} ms_default;
gc_mark_stack *mark_stack = &ms_default.stack;
const gs_memory_t *cmem;
- chunk_splay_walker sw;
+ clump_splay_walker sw;
/* Optionally force global GC for debugging. */
@@ -211,14 +211,14 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
for (i = min_collect; i <= max_trace; ++i)
#define for_space_mems(i, mem)\
for (mem = space_memories[i]; mem != 0; mem = &mem->saved->state)
-#define for_mem_chunks(mem, cp, sw)\
- for (cp = chunk_splay_walk_init(sw, mem); cp != 0; cp = chunk_splay_walk_fwd(sw))
-#define for_space_chunks(i, mem, cp, sw)\
- for_space_mems(i, mem) for_mem_chunks(mem, cp, sw)
-#define for_chunks(i, n, mem, cp, sw)\
- for_spaces(i, n) for_space_chunks(i, mem, cp, sw)
-#define for_collected_chunks(i, mem, cp, sw)\
- for_collected_spaces(i) for_space_chunks(i, mem, cp, sw)
+#define for_mem_clumps(mem, cp, sw)\
+ for (cp = clump_splay_walk_init(sw, mem); cp != 0; cp = clump_splay_walk_fwd(sw))
+#define for_space_clumps(i, mem, cp, sw)\
+ for_space_mems(i, mem) for_mem_clumps(mem, cp, sw)
+#define for_clumps(i, n, mem, cp, sw)\
+ for_spaces(i, n) for_space_clumps(i, mem, cp, sw)
+#define for_collected_clumps(i, mem, cp, sw)\
+ for_collected_spaces(i) for_space_clumps(i, mem, cp, sw)
#define for_roots(i, n, mem, rp)\
for_spaces(i, n)\
for (mem = space_memories[i], rp = mem->roots; rp != 0; rp = rp->next)
@@ -263,15 +263,15 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* Clear marks in spaces to be collected. */
for_collected_spaces(ispace)
- for_space_chunks(ispace, mem, cp, &sw) {
+ for_space_clumps(ispace, mem, cp, &sw) {
gc_objects_clear_marks((const gs_memory_t *)mem, cp);
gc_strings_set_marks(cp, false);
}
- end_phase(state.heap,"clear chunk marks");
+ end_phase(state.heap,"clear clump marks");
/* Clear the marks of roots. We must do this explicitly, */
- /* since some roots are not in any chunk. */
+ /* since some roots are not in any clump. */
for_roots(ispace, max_trace, mem, rp) {
enum_ptr_t eptr;
@@ -301,7 +301,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
{
gc_mark_stack *end = mark_stack;
- for_chunks(ispace, max_trace, mem, cp, &sw) {
+ for_clumps(ispace, max_trace, mem, cp, &sw) {
uint avail = cp->ctop - cp->cbot;
if (avail >= sizeof(gc_mark_stack) + sizeof(ms_entry) *
@@ -338,18 +338,18 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
end_phase(state.heap,"mark");
- /* If this is a local GC, mark from non-local chunks. */
+ /* If this is a local GC, mark from non-local clumps. */
if (!global)
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
- more |= gc_trace_chunk((const gs_memory_t *)mem, cp, &state, mark_stack);
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
+ more |= gc_trace_clump((const gs_memory_t *)mem, cp, &state, mark_stack);
/* Handle mark stack overflow. */
while (more < 0) { /* stack overflowed */
more = 0;
- for_chunks(ispace, max_trace, mem, cp, &sw)
- more |= gc_rescan_chunk(cp, &state, mark_stack);
+ for_clumps(ispace, max_trace, mem, cp, &sw)
+ more |= gc_rescan_clump(cp, &state, mark_stack);
}
end_phase(state.heap,"mark overflow");
@@ -398,17 +398,17 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* We have to clear the marks first, because we want the */
/* relocation to wind up as o_untraced, not o_unmarked. */
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
gc_objects_clear_marks((const gs_memory_t *)mem, cp);
end_phase(state.heap,"post-clear marks");
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
gc_clear_reloc(cp);
end_phase(state.heap,"clear reloc");
- /* Set the relocation of roots outside any chunk to o_untraced, */
+ /* Set the relocation of roots outside any clump to o_untraced, */
/* so we won't try to relocate pointers to them. */
/* (Currently, there aren't any.) */
@@ -425,7 +425,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* we are going to compact. Also finalize freed objects. */
state.cur_mem = (gs_memory_t *)mem;
- for_collected_chunks(ispace, mem, cp, &sw) {
+ for_collected_clumps(ispace, mem, cp, &sw) {
gc_objects_set_reloc(&state, cp);
gc_strings_set_reloc(cp);
}
@@ -443,13 +443,13 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* Relocate pointers. */
state.relocating_untraced = true;
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
gc_do_reloc(cp, mem, &state);
state.relocating_untraced = false;
- for_collected_chunks(ispace, mem, cp, &sw)
+ for_collected_clumps(ispace, mem, cp, &sw)
gc_do_reloc(cp, mem, &state);
- end_phase(state.heap,"relocate chunks");
+ end_phase(state.heap,"relocate clumps");
for_roots(ispace, max_trace, mem, rp) {
if_debug3m('6', (const gs_memory_t *)mem,
@@ -474,11 +474,11 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
for_collected_spaces(ispace) {
for_space_mems(ispace, mem) {
- for_mem_chunks(mem, cp, &sw) {
- if_debug_chunk('6', (const gs_memory_t *)mem, "[6]compacting chunk", cp);
+ for_mem_clumps(mem, cp, &sw) {
+ if_debug_clump('6', (const gs_memory_t *)mem, "[6]compacting clump", cp);
gc_objects_compact(cp, &state);
gc_strings_compact(cp, cmem);
- if_debug_chunk('6', (const gs_memory_t *)mem, "[6]after compaction:", cp);
+ if_debug_clump('6', (const gs_memory_t *)mem, "[6]after compaction:", cp);
if (mem->pcc == cp)
mem->cc = *cp;
}
@@ -489,18 +489,18 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
end_phase(state.heap,"compact");
- /* Free empty chunks. */
+ /* Free empty clumps. */
for_collected_spaces(ispace) {
for_space_mems(ispace, mem) {
- gc_free_empty_chunks(mem);
+ gc_free_empty_clumps(mem);
}
}
- end_phase(state.heap,"free empty chunks");
+ end_phase(state.heap,"free empty clumps");
/*
- * Update previous_status to reflect any freed chunks,
+ * Update previous_status to reflect any freed clumps,
* and set inherited to the negative of allocated,
* so it has no effect. We must update previous_status by
* working back-to-front along the save chain, using pointer reversal.
@@ -602,12 +602,12 @@ ptr_name_index_unmark(enum_ptr_t *pep, gc_state_t * gcst)
/* Do nothing */
}
-/* Unmark the objects in a chunk. */
+/* Unmark the objects in a clump. */
static void
-gc_objects_clear_marks(const gs_memory_t *mem, chunk_t * cp)
+gc_objects_clear_marks(const gs_memory_t *mem, clump_t * cp)
{
- if_debug_chunk('6', mem, "[6]unmarking chunk", cp);
- SCAN_CHUNK_OBJECTS(cp)
+ if_debug_clump('6', mem, "[6]unmarking clump", cp);
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
struct_proc_clear_marks((*proc)) =
pre->o_type->clear_marks;
@@ -658,10 +658,10 @@ gc_init_mark_stack(gc_mark_stack * pms, uint count)
pms->entries[0].is_refs = false;
}
-/* Mark starting from all marked objects in the interval of a chunk */
+/* Mark starting from all marked objects in the interval of a clump */
/* needing rescanning. */
static int
-gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
+gc_rescan_clump(clump_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
{
byte *sbot = cp->rescan_bot;
byte *stop = cp->rescan_top;
@@ -673,10 +673,10 @@ gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
if (sbot > stop)
return 0;
root.p = &comp;
- if_debug_chunk('6', mem, "[6]rescanning chunk", cp);
+ if_debug_clump('6', mem, "[6]rescanning clump", cp);
cp->rescan_bot = cp->cend;
cp->rescan_top = cp->cbase;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if ((byte *) (pre + 1) + size < sbot);
else if ((byte *) (pre + 1) > stop)
@@ -724,11 +724,11 @@ gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
return more;
}
-/* Mark starting from all the objects in a chunk. */
+/* Mark starting from all the objects in a clump. */
/* We assume that pstate->min_collect > avm_system, */
/* so we don't have to trace names. */
static int
-gc_trace_chunk(const gs_memory_t *mem, chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
+gc_trace_clump(const gs_memory_t *mem, clump_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
{
gs_gc_root_t root;
void *comp;
@@ -736,8 +736,8 @@ gc_trace_chunk(const gs_memory_t *mem, chunk_t * cp, gc_state_t * pstate, gc_mar
int min_trace = pstate->min_collect;
root.p = &comp;
- if_debug_chunk('6', mem, "[6]marking from chunk", cp);
- SCAN_CHUNK_OBJECTS(cp)
+ if_debug_clump('6', mem, "[6]marking from clump", cp);
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
{
if_debug2m('7', mem, " [7]scanning/marking 0x%lx(%lu)\n",
@@ -1036,7 +1036,7 @@ gc_trace(gs_gc_root_t * rp, gc_state_t * pstate, gc_mark_stack * pmstack)
return new;
}
/* Link to, attempting to allocate if necessary, */
-/* another chunk of mark stack. */
+/* another clump of mark stack. */
static int
gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate)
{
@@ -1055,7 +1055,7 @@ gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate)
if (pms->next == 0) { /* The mark stack overflowed. */
ms_entry *sp = pms->entries + pms->count - 1;
byte *cptr = sp->ptr; /* container */
- chunk_t *cp = gc_locate(cptr, pstate);
+ clump_t *cp = gc_locate(cptr, pstate);
int new = 1;
if (cp == 0) { /* We were tracing outside collectible */
@@ -1137,27 +1137,27 @@ gc_trace_finish(gc_state_t * pstate)
/* ------ Relocation planning phase ------ */
-/* Initialize the relocation information in the chunk header. */
+/* Initialize the relocation information in the clump header. */
static void
-gc_init_reloc(chunk_t * cp)
+gc_init_reloc(clump_t * cp)
{
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
chead->dest = cp->cbase;
chead->free.o_back =
- offset_of(chunk_head_t, free) >> obj_back_shift;
+ offset_of(clump_head_t, free) >> obj_back_shift;
chead->free.o_size = sizeof(obj_header_t);
chead->free.o_nreloc = 0;
}
-/* Set marks and clear relocation for chunks that won't be compacted. */
+/* Set marks and clear relocation for clumps that won't be compacted. */
static void
-gc_clear_reloc(chunk_t * cp)
+gc_clear_reloc(clump_t * cp)
{
byte *pfree = (byte *) & cp->chead->free;
gc_init_reloc(cp);
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
const struct_shared_procs_t *procs =
pre->o_type->shared;
@@ -1171,18 +1171,18 @@ gc_clear_reloc(chunk_t * cp)
gc_strings_clear_reloc(cp);
}
-/* Set the relocation for the objects in a chunk. */
-/* This will never be called for a chunk with any o_untraced objects. */
+/* Set the relocation for the objects in a clump. */
+/* This will never be called for a clump with any o_untraced objects. */
static void
-gc_objects_set_reloc(gc_state_t * gcst, chunk_t * cp)
+gc_objects_set_reloc(gc_state_t * gcst, clump_t * cp)
{
size_t reloc = 0;
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
byte *pfree = (byte *) & chead->free; /* most recent free object */
- if_debug_chunk('6', gcst->heap, "[6]setting reloc for chunk", cp);
+ if_debug_clump('6', gcst->heap, "[6]setting reloc for clump", cp);
gc_init_reloc(cp);
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
struct_proc_finalize((*finalize));
const struct_shared_procs_t *procs =
@@ -1211,21 +1211,21 @@ gc_objects_set_reloc(gc_state_t * gcst, chunk_t * cp)
#ifdef DEBUG
if (reloc != 0) {
if_debug1m('6', gcst->heap, "[6]freed %u", (unsigned int)reloc);
- if_debug_chunk('6', gcst->heap, " in", cp);
+ if_debug_clump('6', gcst->heap, " in", cp);
}
#endif
}
/* ------ Relocation phase ------ */
-/* Relocate the pointers in all the objects in a chunk. */
+/* Relocate the pointers in all the objects in a clump. */
static void
-gc_do_reloc(chunk_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate)
+gc_do_reloc(clump_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate)
{
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
- if_debug_chunk('6', (const gs_memory_t *)mem, "[6]relocating in chunk", cp);
- SCAN_CHUNK_OBJECTS(cp)
+ if_debug_clump('6', (const gs_memory_t *)mem, "[6]relocating in clump", cp);
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
#ifdef DEBUG
pstate->container = cp;
@@ -1233,7 +1233,7 @@ gc_do_reloc(chunk_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate)
/* We need to relocate the pointers in an object iff */
/* it is o_untraced, or it is a useful object. */
/* An object is free iff its back pointer points to */
- /* the chunk_head structure. */
+ /* the clump_head structure. */
if (o_is_untraced(pre) ||
pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead
) {
@@ -1285,7 +1285,7 @@ igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst)
else {
#ifdef DEBUG
/* Do some sanity checking. */
- chunk_t *cp = gcst->container;
+ clump_t *cp = gcst->container;
if (cp != 0 && cp->cbase <= (byte *)obj && (byte *)obj <cp->ctop) {
if (back > (cp->ctop - cp->cbase) >> obj_back_shift) {
@@ -1294,14 +1294,14 @@ igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst)
gs_abort(NULL);
}
} else {
- /* Pointed to unknown chunk. Can't check it, sorry. */
+ /* Pointed to unknown clump. Can't check it, sorry. */
}
#endif
{
const obj_header_t *pfree = (const obj_header_t *)
((const char *)(optr - 1) -
(back << obj_back_shift));
- const chunk_head_t *chead = (const chunk_head_t *)
+ const clump_head_t *chead = (const clump_head_t *)
((const char *)pfree -
(pfree->o_back << obj_back_shift));
@@ -1322,19 +1322,19 @@ igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst)
/* ------ Compaction phase ------ */
-/* Compact the objects in a chunk. */
-/* This will never be called for a chunk with any o_untraced objects. */
+/* Compact the objects in a clump. */
+/* This will never be called for a clump with any o_untraced objects. */
static void
-gc_objects_compact(chunk_t * cp, gc_state_t * gcst)
+gc_objects_compact(clump_t * cp, gc_state_t * gcst)
{
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
obj_header_t *dpre = (obj_header_t *) chead->dest;
const gs_memory_t *cmem = gcst->spaces.memories.named.system->stable_memory;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
/* An object is free iff its back pointer points to */
- /* the chunk_head structure. */
+ /* the clump_head structure. */
if (pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead) {
const struct_shared_procs_t *procs = pre->o_type->shared;
@@ -1354,7 +1354,7 @@ gc_objects_compact(chunk_t * cp, gc_state_t * gcst)
}
END_OBJECTS_SCAN
if (cp->outer == 0 && chead->dest != cp->cbase)
- dpre = (obj_header_t *) cp->cbase; /* compacted this chunk into another */
+ dpre = (obj_header_t *) cp->cbase; /* compacted this clump into another */
gs_alloc_fill(dpre, gs_alloc_fill_collected, cp->cbot - (byte *) dpre);
cp->cbot = (byte *) dpre;
cp->rcur = 0;
@@ -1364,27 +1364,27 @@ gc_objects_compact(chunk_t * cp, gc_state_t * gcst)
/* ------ Cleanup ------ */
static int
-free_if_empty(chunk_t *cp, void *arg)
+free_if_empty(clump_t *cp, void *arg)
{
gs_ref_memory_t * mem = (gs_ref_memory_t *)arg;
if (cp->cbot == cp->cbase && cp->ctop == cp->climit &&
cp->outer == 0 && cp->inner_count == 0)
{
- alloc_free_chunk(cp, mem);
+ alloc_free_clump(cp, mem);
if (mem->pcc == cp)
mem->pcc = 0;
}
return SPLAY_APP_CONTINUE;
}
-/* Free empty chunks. */
+/* Free empty clumps. */
static void
-gc_free_empty_chunks(gs_ref_memory_t * mem)
+gc_free_empty_clumps(gs_ref_memory_t * mem)
{
/* NOTE: Not in reverse order any more, so potentially
* not quite as good for crap allocators. */
- chunk_splay_app(mem->root, mem, free_if_empty, mem);
+ clump_splay_app(mem->root, mem, free_if_empty, mem);
}
const gs_memory_t * gcst_get_memory_ptr(gc_state_t *gcst)
diff --git a/psi/igc.h b/psi/igc.h
index a01e73796..807053ccc 100644
--- a/psi/igc.h
+++ b/psi/igc.h
@@ -59,7 +59,7 @@ typedef struct name_table_s name_table;
#endif
struct gc_state_s {
const gc_procs_with_refs_t *procs; /* must be first */
- chunk_locator_t loc;
+ clump_locator_t loc;
vm_spaces spaces;
int min_collect; /* avm_space */
bool relocating_untraced; /* if true, we're relocating */
@@ -68,7 +68,7 @@ struct gc_state_s {
name_table *ntable; /* (implicitly referenced by names) */
gs_memory_t *cur_mem;
#ifdef DEBUG
- chunk_t *container;
+ clump_t *container;
#endif
};
@@ -79,8 +79,8 @@ ptr_proc_mark(ptr_ref_mark);
/* Exported by ilocate.c for igc.c */
void ialloc_validate_memory(const gs_ref_memory_t *, gc_state_t *);
-void ialloc_validate_chunk(const chunk_t *, gc_state_t *);
-void ialloc_validate_object(const obj_header_t *, const chunk_t *,
+void ialloc_validate_clump(const clump_t *, gc_state_t *);
+void ialloc_validate_object(const obj_header_t *, const clump_t *,
gc_state_t *);
/* Exported by igc.c for ilocate.c */
diff --git a/psi/igcstr.c b/psi/igcstr.c
index 1789bebb9..0694c7a98 100644
--- a/psi/igcstr.c
+++ b/psi/igcstr.c
@@ -24,11 +24,11 @@
#include "igc.h"
/* Forward references */
-static bool gc_mark_string(const byte *, uint, bool, const chunk_t *);
+static bool gc_mark_string(const byte *, uint, bool, const clump_t *);
-/* (Un)mark the strings in a chunk. */
+/* (Un)mark the strings in a clump. */
void
-gc_strings_set_marks(chunk_t * cp, bool mark)
+gc_strings_set_marks(clump_t * cp, bool mark)
{
if (cp->smark != 0) {
if_debug3('6', "[6]clearing string marks 0x%lx[%u] to %d\n",
@@ -59,9 +59,9 @@ typedef string_mark_unit bword;
# define bword_swap_bytes(m) DO_NOTHING
#endif
-/* (Un)mark a string in a known chunk. Return true iff any new marks. */
+/* (Un)mark a string in a known clump. Return true iff any new marks. */
static bool
-gc_mark_string(const byte * ptr, uint size, bool set, const chunk_t * cp)
+gc_mark_string(const byte * ptr, uint size, bool set, const clump_t * cp)
{
uint offset = (ptr - HDR_ID_OFFSET) - cp->sbase;
bword *bp = (bword *) (cp->smark + ((offset & -bword_bits) >> 3));
@@ -130,7 +130,7 @@ dmfwrite(const gs_memory_t *mem, const byte *ptr, uint count)
bool
gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
{
- const chunk_t *cp;
+ const clump_t *cp;
bool marks;
if (size == 0)
@@ -138,12 +138,12 @@ gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
#define dmprintstr(mem)\
dmputc(mem, '('); dmfwrite(mem, ptr - HDR_ID_OFFSET, min(size, 20));\
dmputs(mem, (size <= 20 ? ")" : "...)"))
- if (!(cp = gc_locate(ptr - HDR_ID_OFFSET, gcst))) { /* not in a chunk */
+ if (!(cp = gc_locate(ptr - HDR_ID_OFFSET, gcst))) { /* not in a clump */
#ifdef DEBUG
if (gs_debug_c('5')) {
dmlprintf2(gcst->heap, "[5]0x%lx[%u]", (ulong) ptr - HDR_ID_OFFSET, size);
dmprintstr(gcst->heap);
- dmputs(gcst->heap, " not in a chunk\n");
+ dmputs(gcst->heap, " not in a clump\n");
}
#endif
return false;
@@ -156,17 +156,17 @@ gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
(ulong) ptr - HDR_ID_OFFSET, size, (ulong) cp->ctop, (ulong) cp->climit);
return false;
} else if (ptr + size > cp->climit) { /*
- * If this is the bottommost string in a chunk that has
- * an inner chunk, the string's starting address is both
- * cp->ctop of the outer chunk and cp->climit of the inner;
+ * If this is the bottommost string in a clump that has
+ * an inner clump, the string's starting address is both
+ * cp->ctop of the outer clump and cp->climit of the inner;
* gc_locate may incorrectly attribute the string to the
- * inner chunk because of this. This doesn't affect
+ * inner clump because of this. This doesn't affect
* marking or relocation, since the machinery for these
- * is all associated with the outermost chunk,
+ * is all associated with the outermost clump,
* but it can cause the validity check to fail.
* Check for this case now.
*/
- const chunk_t *scp = cp;
+ const clump_t *scp = cp;
while (ptr - HDR_ID_OFFSET == scp->climit && scp->outer != 0)
scp = scp->outer;
@@ -194,7 +194,7 @@ gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
/* Clear the relocation for strings. */
/* This requires setting the marks. */
void
-gc_strings_clear_reloc(chunk_t * cp)
+gc_strings_clear_reloc(clump_t * cp)
{
if (cp->sreloc != 0) {
gc_strings_set_marks(cp, true);
@@ -218,11 +218,11 @@ static const byte count_zero_bits_table[256] =
#define byte_count_one_bits(byt)\
(uint)(8 - count_zero_bits_table[byt])
-/* Set the relocation for the strings in a chunk. */
+/* Set the relocation for the strings in a clump. */
/* The sreloc table stores the relocated offset from climit for */
/* the beginning of each block of string_data_quantum characters. */
void
-gc_strings_set_reloc(chunk_t * cp)
+gc_strings_set_reloc(clump_t * cp)
{
if (cp->sreloc != 0 && cp->smark != 0) {
byte *bot = cp->ctop;
@@ -280,7 +280,7 @@ void
igc_reloc_string(gs_string * sptr, gc_state_t * gcst)
{
byte *ptr;
- const chunk_t *cp;
+ const clump_t *cp;
uint offset;
uint reloc;
const byte *bitp;
@@ -293,7 +293,7 @@ igc_reloc_string(gs_string * sptr, gc_state_t * gcst)
ptr = sptr->data;
ptr -= HDR_ID_OFFSET;
- if (!(cp = gc_locate(ptr, gcst))) /* not in a chunk */
+ if (!(cp = gc_locate(ptr, gcst))) /* not in a clump */
return;
if (cp->sreloc == 0 || cp->smark == 0) /* not marking strings */
return;
@@ -339,9 +339,9 @@ igc_reloc_param_string(gs_param_string * sptr, gc_state_t * gcst)
}
}
-/* Compact the strings in a chunk. */
+/* Compact the strings in a clump. */
void
-gc_strings_compact(chunk_t * cp, const gs_memory_t *mem)
+gc_strings_compact(clump_t * cp, const gs_memory_t *mem)
{
if (cp->smark != 0) {
byte *hi = cp->climit;
diff --git a/psi/igcstr.h b/psi/igcstr.h
index 4bf0fbc93..c0f14c58d 100644
--- a/psi/igcstr.h
+++ b/psi/igcstr.h
@@ -20,14 +20,14 @@
# define igcstr_INCLUDED
/* Exported by ilocate.c for igcstr.c */
-chunk_t *gc_locate(const void *, gc_state_t *);
+clump_t *gc_locate(const void *, gc_state_t *);
/* Exported by igcstr.c for igc.c */
-void gc_strings_set_marks(chunk_t *, bool);
+void gc_strings_set_marks(clump_t *, bool);
bool gc_string_mark(const byte *, uint, bool, gc_state_t *);
-void gc_strings_clear_reloc(chunk_t *);
-void gc_strings_set_reloc(chunk_t *);
-void gc_strings_compact(chunk_t *, const gs_memory_t *);
+void gc_strings_clear_reloc(clump_t *);
+void gc_strings_set_reloc(clump_t *);
+void gc_strings_compact(clump_t *, const gs_memory_t *);
string_proc_reloc(igc_reloc_string);
const_string_proc_reloc(igc_reloc_const_string);
param_string_proc_reloc(igc_reloc_param_string);
diff --git a/psi/ilocate.c b/psi/ilocate.c
index 8fd3d7262..f5d41a4d6 100644
--- a/psi/ilocate.c
+++ b/psi/ilocate.c
@@ -32,23 +32,23 @@
#include "store.h"
#ifdef DEBUG
-static int do_validate_chunk(const chunk_t * cp, gc_state_t * gcst);
-static int do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+static int do_validate_clump(const clump_t * cp, gc_state_t * gcst);
+static int do_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst);
#endif
/* ================ Locating ================ */
-/* Locate a pointer in the chunks of a space being collected. */
+/* Locate a pointer in the clumps of a space being collected. */
/* This is only used for string garbage collection and for debugging. */
-chunk_t *
+clump_t *
gc_locate(const void *ptr, gc_state_t * gcst)
{
const gs_ref_memory_t *mem;
const gs_ref_memory_t *other;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
mem = gcst->loc.memory;
@@ -62,7 +62,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
) {
gcst->loc.memory = other;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
@@ -76,13 +76,13 @@ gc_locate(const void *ptr, gc_state_t * gcst)
gcst->loc.memory = other =
(mem->space == avm_local ? gcst->space_global : gcst->space_local);
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
/* Try its stable allocator. */
if (other->stable_memory != (const gs_memory_t *)other) {
gcst->loc.memory = (gs_ref_memory_t *)other->stable_memory;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
gcst->loc.memory = other;
}
@@ -90,7 +90,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
while (gcst->loc.memory->saved != 0) {
gcst->loc.memory = &gcst->loc.memory->saved->state;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
}
@@ -103,7 +103,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
if (mem != gcst->space_system) {
gcst->loc.memory = gcst->space_system;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
@@ -119,7 +119,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
if (other->stable_memory != (const gs_memory_t *)other) {
gcst->loc.memory = (gs_ref_memory_t *)other->stable_memory;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
gcst->loc.memory = other;
@@ -134,7 +134,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
for (;;) {
if (gcst->loc.memory != mem) { /* don't do twice */
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
if (gcst->loc.memory->saved == 0)
@@ -155,7 +155,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
/* Define the structure for temporarily saving allocator state. */
typedef struct alloc_temp_save_s {
- chunk_t cc;
+ clump_t cc;
uint rsize;
ref rlast;
} alloc_temp_save_t;
@@ -163,7 +163,7 @@ typedef struct alloc_temp_save_s {
static void
alloc_temp_save(alloc_temp_save_t *pats, gs_ref_memory_t *mem)
{
- chunk_t *pcc = mem->pcc;
+ clump_t *pcc = mem->pcc;
obj_header_t *rcur = mem->cc.rcur;
if (pcc != 0) {
@@ -182,7 +182,7 @@ alloc_temp_save(alloc_temp_save_t *pats, gs_ref_memory_t *mem)
static void
alloc_temp_restore(alloc_temp_save_t *pats, gs_ref_memory_t *mem)
{
- chunk_t *pcc = mem->pcc;
+ clump_t *pcc = mem->pcc;
obj_header_t *rcur = mem->cc.rcur;
if (rcur != 0) {
@@ -248,15 +248,15 @@ ialloc_validate_memory(const gs_ref_memory_t * mem, gc_state_t * gcst)
for (smem = mem, level = 0; smem != 0;
smem = &smem->saved->state, --level
) {
- chunk_splay_walker sw;
- const chunk_t *cp;
+ clump_splay_walker sw;
+ const clump_t *cp;
int i;
if_debug3m('6', (gs_memory_t *)mem, "[6]validating memory 0x%lx, space %d, level %d\n",
(ulong) mem, mem->space, level);
- /* Validate chunks. */
- for (cp = chunk_splay_walk_init(&sw, smem); cp != 0; cp = chunk_splay_walk_fwd(&sw))
- if (do_validate_chunk(cp, gcst)) {
+ /* Validate clumps. */
+ for (cp = clump_splay_walk_init(&sw, smem); cp != 0; cp = clump_splay_walk_fwd(&sw))
+ if (do_validate_clump(cp, gcst)) {
mlprintf3((gs_memory_t *)mem, "while validating memory 0x%lx, space %d, level %d\n",
(ulong) mem, mem->space, level);
gs_abort(gcst->heap);
@@ -290,13 +290,13 @@ ialloc_validate_memory(const gs_ref_memory_t * mem, gc_state_t * gcst)
/* Check the validity of an object's size. */
static inline bool
-object_size_valid(const obj_header_t * pre, uint size, const chunk_t * cp)
+object_size_valid(const obj_header_t * pre, uint size, const clump_t * cp)
{
return (pre->o_alone ? (const byte *)pre == cp->cbase :
size <= cp->ctop - (const byte *)(pre + 1));
}
-/* Validate all the objects in a chunk. */
+/* Validate all the objects in a clump. */
#if IGC_PTR_STABILITY_CHECK
void ialloc_validate_pointer_stability(const obj_header_t * ptr_from,
const obj_header_t * ptr_to);
@@ -307,21 +307,21 @@ static int ialloc_validate_ref(const ref *, gc_state_t *);
static int ialloc_validate_ref_packed(const ref_packed *, gc_state_t *);
#endif
static int
-do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
+do_validate_clump(const clump_t * cp, gc_state_t * gcst)
{
int ret = 0;
- if_debug_chunk('6', gcst->heap, "[6]validating chunk", cp);
- SCAN_CHUNK_OBJECTS(cp);
+ if_debug_clump('6', gcst->heap, "[6]validating clump", cp);
+ SCAN_CLUMP_OBJECTS(cp);
DO_ALL
if (pre->o_type == &st_free) {
if (!object_size_valid(pre, size, cp)) {
- lprintf3("Bad free object 0x%lx(%lu), in chunk 0x%lx!\n",
+ lprintf3("Bad free object 0x%lx(%lu), in clump 0x%lx!\n",
(ulong) (pre + 1), (ulong) size, (ulong) cp);
return 1;
}
} else if (do_validate_object(pre + 1, cp, gcst)) {
- dmprintf_chunk(gcst->heap, "while validating chunk", cp);
+ dmprintf_clump(gcst->heap, "while validating clump", cp);
return 1;
}
if_debug3m('7', gcst->heap, " [7]validating %s(%lu) 0x%lx\n",
@@ -341,7 +341,7 @@ do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
mlprintf3(gcst->heap, "while validating %s(%lu) 0x%lx\n",
struct_type_name_string(pre->o_type),
(ulong) size, (ulong) pre);
- dmprintf_chunk(gcst->heap, "in chunk", cp);
+ dmprintf_clump(gcst->heap, "in clump", cp);
return ret;
}
rp = packed_next(rp);
@@ -372,7 +372,7 @@ do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
ret = ialloc_validate_ref_packed(eptr.ptr, gcst);
# endif
if (ret) {
- dmprintf_chunk(gcst->heap, "while validating chunk", cp);
+ dmprintf_clump(gcst->heap, "while validating clump", cp);
return ret;
}
}
@@ -382,9 +382,9 @@ do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
}
void
-ialloc_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
+ialloc_validate_clump(const clump_t * cp, gc_state_t * gcst)
{
- if (do_validate_chunk(cp, gcst))
+ if (do_validate_clump(cp, gcst))
gs_abort(gcst->heap);
}
@@ -476,7 +476,7 @@ cks: if (optr != 0) {
if (r_space(&sref) != avm_foreign &&
!gc_locate(sref.value.const_bytes, gcst)
) {
- lprintf4("At 0x%lx, bad name %u, pname = 0x%lx, string 0x%lx not in any chunk\n",
+ lprintf4("At 0x%lx, bad name %u, pname = 0x%lx, string 0x%lx not in any clump\n",
(ulong) pref, (uint) r_size(pref),
(ulong) pref->value.pname,
(ulong) sref.value.const_bytes);
@@ -486,7 +486,7 @@ cks: if (optr != 0) {
break;
case t_string:
if (r_size(pref) != 0 && !gc_locate(pref->value.bytes, gcst)) {
- lprintf3("At 0x%lx, string ptr 0x%lx[%u] not in any chunk\n",
+ lprintf3("At 0x%lx, string ptr 0x%lx[%u] not in any clump\n",
(ulong) pref, (ulong) pref->value.bytes,
(uint) r_size(pref));
ret = 1;
@@ -499,7 +499,7 @@ cks: if (optr != 0) {
size = r_size(pref);
tname = "array";
cka: if (!gc_locate(rptr, gcst)) {
- lprintf3("At 0x%lx, %s 0x%lx not in any chunk\n",
+ lprintf3("At 0x%lx, %s 0x%lx not in any clump\n",
(ulong) pref, tname, (ulong) rptr);
ret = 1;
break;
@@ -523,7 +523,7 @@ cka: if (!gc_locate(rptr, gcst)) {
break;
optr = pref->value.packed;
if (!gc_locate(optr, gcst)) {
- lprintf2("At 0x%lx, packed array 0x%lx not in any chunk\n",
+ lprintf2("At 0x%lx, packed array 0x%lx not in any clump\n",
(ulong) pref, (ulong) optr);
ret = 1;
}
@@ -575,7 +575,7 @@ ialloc_validate_pointer_stability(const obj_header_t * ptr_fr,
/* Validate an object. */
static int
-do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+do_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst)
{
const obj_header_t *pre = ptr - 1;
@@ -590,13 +590,13 @@ do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
st = *gcst; /* no side effects! */
if (!(cp = gc_locate(pre, &st))) {
- mlprintf1(gcst->heap, "Object 0x%lx not in any chunk!\n",
+ mlprintf1(gcst->heap, "Object 0x%lx not in any clump!\n",
(ulong) ptr);
return 1; /*gs_abort(); */
}
}
if (otype == &st_free) {
- mlprintf3(gcst->heap, "Reference to free object 0x%lx(%lu), in chunk 0x%lx!\n",
+ mlprintf3(gcst->heap, "Reference to free object 0x%lx(%lu), in clump 0x%lx!\n",
(ulong) ptr, (ulong) size, (ulong) cp);
return 1;
}
@@ -608,7 +608,7 @@ do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
) {
mlprintf2(gcst->heap, "Bad object 0x%lx(%lu),\n",
(ulong) ptr, (ulong) size);
- dmprintf2(gcst->heap, " ssize = %u, in chunk 0x%lx!\n",
+ dmprintf2(gcst->heap, " ssize = %u, in clump 0x%lx!\n",
otype->ssize, (ulong) cp);
return 1;
}
@@ -616,7 +616,7 @@ do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
}
void
-ialloc_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+ialloc_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst)
{
if (do_validate_object(ptr, cp, gcst))
@@ -635,12 +635,12 @@ ialloc_validate_memory(const gs_ref_memory_t * mem, gc_state_t * gcst)
}
void
-ialloc_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
+ialloc_validate_clump(const clump_t * cp, gc_state_t * gcst)
{
}
void
-ialloc_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+ialloc_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst)
{
}
diff --git a/psi/ireclaim.c b/psi/ireclaim.c
index 362d99604..d80523842 100644
--- a/psi/ireclaim.c
+++ b/psi/ireclaim.c
@@ -128,7 +128,7 @@ gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
/****** ABORT IF code < 0 ******/
for (i = nmem; --i >= 0; )
- alloc_close_chunk(memories[i]);
+ alloc_close_clump(memories[i]);
/* Prune the file list so it won't retain potentially collectible */
/* files. */
@@ -172,16 +172,16 @@ gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
dicts_gc_cleanup();
- /* Reopen the active chunks. */
+ /* Reopen the active clumps. */
for (i = 0; i < nmem; ++i)
- alloc_open_chunk(memories[i]);
+ alloc_open_clump(memories[i]);
/* Reload the context state. Note this should be done
- AFTER the chunks are reopened, since the context state
+ AFTER the clumps are reopened, since the context state
load could do allocations that must remain.
- If it were done while the chunks were still closed,
- we would lose those allocations when the chunks were opened */
+ If it were done while the clumps were still closed,
+ we would lose those allocations when the clumps were opened */
code = context_state_load(i_ctx_p);
return code;
diff --git a/psi/isave.c b/psi/isave.c
index 0a39e5af4..f37423e80 100644
--- a/psi/isave.c
+++ b/psi/isave.c
@@ -39,9 +39,9 @@ private_st_alloc_save();
/* see below for details. */
static const long max_repeated_scan = 100000;
-/* Define the minimum space for creating an inner chunk. */
-/* Must be at least sizeof(chunk_head_t). */
-static const long min_inner_chunk_space = sizeof(chunk_head_t) + 500;
+/* Define the minimum space for creating an inner clump. */
+/* Must be at least sizeof(clump_head_t). */
+static const long min_inner_clump_space = sizeof(clump_head_t) + 500;
/*
* The logic for saving and restoring the state is complex.
@@ -51,23 +51,23 @@ static const long min_inner_chunk_space = sizeof(chunk_head_t) + 500;
/*
* To save the state of the memory manager:
- * Save the state of the current chunk in which we are allocating.
- * Shrink all chunks to their inner unallocated region.
+ * Save the state of the current clump in which we are allocating.
+ * Shrink all clumps to their inner unallocated region.
* Save and reset the free block chains.
* By doing this, we guarantee that no object older than the save
* can be freed.
*
* To restore the state of the memory manager:
- * Free all chunks newer than the save, and the descriptors for
- * the inner chunks created by the save.
- * Make current the chunk that was current at the time of the save.
- * Restore the state of the current chunk.
+ * Free all clumps newer than the save, and the descriptors for
+ * the inner clumps created by the save.
+ * Make current the clump that was current at the time of the save.
+ * Restore the state of the current clump.
*
* In addition to save ("start transaction") and restore ("abort transaction"),
* we support forgetting a save ("commit transation"). To forget a save:
- * Reassign to the next outer save all chunks newer than the save.
- * Free the descriptors for the inners chunk, updating their outer
- * chunks to reflect additional allocations in the inner chunks.
+ * Reassign to the next outer save all clumps newer than the save.
+ * Free the descriptors for the inners clump, updating their outer
+ * clumps to reflect additional allocations in the inner clumps.
* Concatenate the free block chains with those of the outer save.
*/
@@ -316,7 +316,7 @@ alloc_free_save(gs_ref_memory_t *mem, alloc_save_t *save, const char *scn)
gs_ref_memory_t save_mem;
save_mem = mem->saved->state;
gs_free_object((gs_memory_t *)mem, save, scn);
- /* Free any inner chunk structures. This is the easiest way to do it. */
+ /* Free any inner clump structures. This is the easiest way to do it. */
restore_free(mem);
/* Restore the 'saved' state - this pulls our object off the linked
* list of states. Without this we hit a SEGV in the gc later. */
@@ -403,44 +403,44 @@ alloc_save_space(gs_ref_memory_t * mem, gs_dual_memory_t * dmem, ulong sid)
{
gs_ref_memory_t save_mem;
alloc_save_t *save;
- chunk_t *cp;
- chunk_t *new_pcc = 0;
- chunk_splay_walker sw;
+ clump_t *cp;
+ clump_t *new_pcc = 0;
+ clump_splay_walker sw;
save_mem = *mem;
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
mem->pcc = 0;
gs_memory_status((gs_memory_t *) mem, &mem->previous_status);
ialloc_reset(mem);
- /* Create inner chunks wherever it's worthwhile. */
+ /* Create inner clumps wherever it's worthwhile. */
- for (cp = chunk_splay_walk_init(&sw, &save_mem); cp != 0; cp = chunk_splay_walk_fwd(&sw)) {
- if (cp->ctop - cp->cbot > min_inner_chunk_space) {
- /* Create an inner chunk to cover only the unallocated part. */
- chunk_t *inner =
- gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_chunk,
+ for (cp = clump_splay_walk_init(&sw, &save_mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) {
+ if (cp->ctop - cp->cbot > min_inner_clump_space) {
+ /* Create an inner clump to cover only the unallocated part. */
+ clump_t *inner =
+ gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_clump,
"alloc_save_space(inner)");
if (inner == 0)
break; /* maybe should fail */
- alloc_init_chunk(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp);
- alloc_link_chunk(inner, mem);
- if_debug2m('u', (gs_memory_t *)mem, "[u]inner chunk: cbot=0x%lx ctop=0x%lx\n",
+ alloc_init_clump(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp);
+ alloc_link_clump(inner, mem);
+ if_debug2m('u', (gs_memory_t *)mem, "[u]inner clump: cbot=0x%lx ctop=0x%lx\n",
(ulong) inner->cbot, (ulong) inner->ctop);
if (cp == save_mem.pcc)
new_pcc = inner;
}
}
mem->pcc = new_pcc;
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
save = gs_alloc_struct((gs_memory_t *) mem, alloc_save_t,
&st_alloc_save, "alloc_save_space(save)");
if_debug2m('u', (gs_memory_t *)mem, "[u]save space %u at 0x%lx\n",
mem->space, (ulong) save);
if (save == 0) {
- /* Free the inner chunk structures. This is the easiest way. */
+ /* Free the inner clump structures. This is the easiest way. */
restore_free(mem);
*mem = save_mem;
return 0;
@@ -611,12 +611,12 @@ alloc_save_current(const gs_dual_memory_t * dmem)
bool
alloc_is_since_save(const void *vptr, const alloc_save_t * save)
{
- /* A reference postdates a save iff it is in a chunk allocated */
- /* since the save (including any carried-over inner chunks). */
+ /* A reference postdates a save iff it is in a clump allocated */
+ /* since the save (including any carried-over inner clumps). */
const char *const ptr = (const char *)vptr;
register const gs_ref_memory_t *mem = save->space_local;
- chunk_splay_walker sw;
+ clump_splay_walker sw;
if_debug2m('U', (gs_memory_t *)mem, "[U]is_since_save 0x%lx, 0x%lx:\n",
(ulong) ptr, (ulong) save);
@@ -624,15 +624,15 @@ alloc_is_since_save(const void *vptr, const alloc_save_t * save)
/* alloc_restore_all. */
return true;
}
- /* Check against chunks allocated since the save. */
+ /* Check against clumps allocated since the save. */
/* (There may have been intermediate saves as well.) */
for (;; mem = &mem->saved->state) {
- const chunk_t *cp;
+ const clump_t *cp;
if_debug1m('U', (gs_memory_t *)mem, "[U]checking mem=0x%lx\n", (ulong) mem);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw)) {
- if (ptr_is_within_chunk(ptr, cp)) {
- if_debug3m('U', (gs_memory_t *)mem, "[U+]in new chunk 0x%lx: 0x%lx, 0x%lx\n",
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) {
+ if (ptr_is_within_clump(ptr, cp)) {
+ if_debug3m('U', (gs_memory_t *)mem, "[U+]in new clump 0x%lx: 0x%lx, 0x%lx\n",
(ulong) cp,
(ulong) cp->cbase, (ulong) cp->cend);
return true;
@@ -656,12 +656,12 @@ alloc_is_since_save(const void *vptr, const alloc_save_t * save)
(mem = save->space_global) != save->space_local &&
save->space_global->num_contexts == 1
) {
- const chunk_t *cp;
+ const clump_t *cp;
if_debug1m('U', (gs_memory_t *)mem, "[U]checking global mem=0x%lx\n", (ulong) mem);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw))
- if (ptr_is_within_chunk(ptr, cp)) {
- if_debug3m('U', (gs_memory_t *)mem, "[U+] new chunk 0x%lx: 0x%lx, 0x%lx\n",
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw))
+ if (ptr_is_within_clump(ptr, cp)) {
+ if_debug3m('U', (gs_memory_t *)mem, "[U+] new clump 0x%lx: 0x%lx, 0x%lx\n",
(ulong) cp, (ulong) cp->cbase, (ulong) cp->cend);
return true;
}
@@ -844,7 +844,7 @@ restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem)
}
/* Free memory allocated since the save. */
- /* Note that this frees all chunks except the inner ones */
+ /* Note that this frees all clumps except the inner ones */
/* belonging to this level. */
saved = *save;
restore_free(mem);
@@ -856,7 +856,7 @@ restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem)
*mem = saved.state;
mem->num_contexts = num_contexts;
}
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
/* Make the allocator current if it was current before the save. */
if (saved.is_current) {
@@ -933,13 +933,13 @@ alloc_restore_all(gs_dual_memory_t * dmem)
static void
restore_finalize(gs_ref_memory_t * mem)
{
- chunk_t *cp;
- chunk_splay_walker sw;
+ clump_t *cp;
+ clump_splay_walker sw;
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
gs_enable_free((gs_memory_t *) mem, false);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_bwd(&sw)) {
- SCAN_CHUNK_OBJECTS(cp)
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_bwd(&sw)) {
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
struct_proc_finalize((*finalize)) =
pre->o_type->finalize;
@@ -983,7 +983,7 @@ restore_resources(alloc_save_t * sprev, gs_ref_memory_t * mem)
static void
restore_free(gs_ref_memory_t * mem)
{
- /* Free chunks allocated since the save. */
+ /* Free clumps allocated since the save. */
gs_free_all((gs_memory_t *) mem);
}
@@ -1047,31 +1047,31 @@ alloc_forget_save_in(gs_dual_memory_t *dmem, alloc_save_t * save)
while (sprev != save);
return 0;
}
-/* Combine the chunks of the next outer level with those of the current one, */
+/* Combine the clumps of the next outer level with those of the current one, */
/* and free the bookkeeping structures. */
static void
combine_space(gs_ref_memory_t * mem)
{
alloc_save_t *saved = mem->saved;
gs_ref_memory_t *omem = &saved->state;
- chunk_t *cp;
- chunk_splay_walker sw;
+ clump_t *cp;
+ clump_splay_walker sw;
- alloc_close_chunk(mem);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw)) {
+ alloc_close_clump(mem);
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) {
if (cp->outer == 0)
- alloc_link_chunk(cp, omem);
+ alloc_link_clump(cp, omem);
else {
- chunk_t *outer = cp->outer;
+ clump_t *outer = cp->outer;
outer->inner_count--;
if (mem->pcc == cp)
mem->pcc = outer;
if (mem->cfreed.cp == cp)
mem->cfreed.cp = outer;
- /* "Free" the header of the inner chunk, */
+ /* "Free" the header of the inner clump, */
/* and any immediately preceding gap left by */
- /* the GC having compacted the outer chunk. */
+ /* the GC having compacted the outer clump. */
{
obj_header_t *hp = (obj_header_t *) outer->cbot;
@@ -1086,7 +1086,7 @@ combine_space(gs_ref_memory_t * mem)
hp + 1, "combine_space(header)");
#endif /* **************** */
}
- /* Update the outer chunk's allocation pointers. */
+ /* Update the outer clump's allocation pointers. */
outer->cbot = cp->cbot;
outer->rcur = cp->rcur;
outer->rtop = cp->rtop;
@@ -1125,7 +1125,7 @@ combine_space(gs_ref_memory_t * mem)
mem->largest_free_size = omem->largest_free_size;
}
gs_free_object((gs_memory_t *) mem, saved, "combine_space(saved)");
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
}
/* Free the changes chain for a level 0 .forgetsave, */
/* resetting the l_new flag in the changed refs. */
@@ -1268,11 +1268,11 @@ save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned
return code;
/* Handle newly allocated ref objects. */
- SCAN_MEM_CHUNKS(mem, cp) {
+ SCAN_MEM_CLUMPS(mem, cp) {
if (cp->has_refs) {
bool has_refs = false;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if_debug3m('U', (gs_memory_t *)mem, "[U]set_new scan(0x%lx(%u), %d)\n",
(ulong) pre, size, to_new);
@@ -1291,7 +1291,7 @@ save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned
cp->has_refs = has_refs;
}
}
- END_CHUNKS_SCAN
+ END_CLUMPS_SCAN
if_debug2m('u', (gs_memory_t *)mem, "[u]set_new (%s) scanned %ld\n",
(to_new ? "restore" : "save"), scanned);
*pscanned = scanned;
@@ -1305,14 +1305,14 @@ drop_redundant_changes(gs_ref_memory_t * mem)
register alloc_change_t *chp = mem->changes, *chp_back = NULL, *chp_forth;
/* As we are trying to throw away redundant changes in an allocator instance
- that has already been "saved", the active chunk has already been "closed"
+ that has already been "saved", the active clump has already been "closed"
by alloc_save_space(). Using such an allocator (for example, by calling
gs_free_object() with it) can leave it in an unstable state, causing
- problems for the garbage collector (specifically, the chunk validator code).
- So, before we might use it, open the current chunk, and then close it again
+ problems for the garbage collector (specifically, the clump validator code).
+ So, before we might use it, open the current clump, and then close it again
when we're done.
*/
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
/* First reverse the list and set all. */
for (; chp; chp = chp_forth) {
@@ -1356,7 +1356,7 @@ drop_redundant_changes(gs_ref_memory_t * mem)
}
mem->changes = chp_back;
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
}
/* Set or reset the l_new attribute on the changes chain. */
diff --git a/psi/zcontext.c b/psi/zcontext.c
index bda4b478d..aaf349e0c 100644
--- a/psi/zcontext.c
+++ b/psi/zcontext.c
@@ -248,7 +248,7 @@ context_reclaim(vm_spaces * pspaces, bool global)
gs_context_t *pctx = 0; /* = 0 is bogus to pacify compilers */
gs_scheduler_t *psched = 0;
gs_ref_memory_t *lmem = 0; /* = 0 is bogus to pacify compilers */
- chunk_locator_t loc;
+ clump_locator_t loc;
for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
gs_ref_memory_t *mem = pspaces->memories.indexed[i];
@@ -273,7 +273,7 @@ context_reclaim(vm_spaces * pspaces, bool global)
loc.cp = 0;
for (i = 0; i < CTX_TABLE_SIZE; ++i)
for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
- pctx->visible = chunk_locate_ptr(pctx, &loc);
+ pctx->visible = clump_locate_ptr(pctx, &loc);
#ifdef DEBUG
if (!psched->current->visible) {
@@ -617,8 +617,8 @@ do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
return_error(gs_error_Fatal);
old_userdict = *puserdict;
userdict_size = dict_maxlength(&old_userdict);
- lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
- lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
+ lmem = ialloc_alloc_state(parent, iimemory_local->clump_size);
+ lmem_stable = ialloc_alloc_state(parent, iimemory_local->clump_size);
if (lmem == 0 || lmem_stable == 0) {
gs_free_object(parent, lmem_stable, "do_fork");
gs_free_object(parent, lmem, "do_fork");