summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--base/gdbflags.h2
-rw-r--r--base/gsalloc.c476
-rw-r--r--base/gxalloc.h200
-rw-r--r--base/gxobj.h6
-rw-r--r--doc/Develop.htm8
-rw-r--r--doc/Use.htm2
-rw-r--r--psi/ialloc.c12
-rw-r--r--psi/igc.c180
-rw-r--r--psi/igc.h8
-rw-r--r--psi/igcstr.c42
-rw-r--r--psi/igcstr.h10
-rw-r--r--psi/ilocate.c84
-rw-r--r--psi/ireclaim.c12
-rw-r--r--psi/isave.c134
-rw-r--r--psi/zcontext.c8
15 files changed, 592 insertions, 592 deletions
diff --git a/base/gdbflags.h b/base/gdbflags.h
index dcc7238d5..5ee6f2331 100644
--- a/base/gdbflags.h
+++ b/base/gdbflags.h
@@ -75,7 +75,7 @@ FLAG(curve, '2', 0, "Curve subdivider/rasterizer"),
FLAG(curve_detail, '3', 0, "Curve subdivider/rasterizer (detail)"),
FLAG(gc_strings, '4', 0, "Garbage collector (strings)"),
FLAG(gc_strings_detail, '5', 0, "Garbage collector (strings, detail)"),
-FLAG(gc_chunks, '6', 0, "Garbage collector (chunks, roots)"),
+FLAG(gc_clumps, '6', 0, "Garbage collector (clumps, roots)"),
FLAG(gc_objects, '7', 0, "Garbage collector (objects)"),
FLAG(gc_refs, '8', 0, "Garbage collector (refs)"),
FLAG(gc_pointers, '9', 0, "Garbage collector (pointers)"),
diff --git a/base/gsalloc.c b/base/gsalloc.c
index 774c2b21c..7a828393f 100644
--- a/base/gsalloc.c
+++ b/base/gsalloc.c
@@ -40,12 +40,12 @@ gs_id get_mem_hdr_id (void *ptr)
/*
- * Define whether to try consolidating space before adding a new chunk.
+ * Define whether to try consolidating space before adding a new clump.
* The default is not to do this, because it is computationally
* expensive and doesn't seem to help much. However, this is done for
* "controlled" spaces whether or not the #define is in effect.
*/
-/*#define CONSOLIDATE_BEFORE_ADDING_CHUNK */
+/*#define CONSOLIDATE_BEFORE_ADDING_CLUMP */
/*
* This allocator produces tracing messages of the form
@@ -56,7 +56,7 @@ gs_id get_mem_hdr_id (void *ptr)
* O is {alloc = +, free = -, grow = >, shrink = <},
* T is {bytes = b, object = <, ref = $, string = >}, and
* S is {small freelist = f, large freelist = F, LIFO = space,
- * own chunk = L, lost = #, lost own chunk = ~, other = .}.
+ * own clump = L, lost = #, lost own clump = ~, other = .}.
*/
#ifdef DEBUG
static int
@@ -129,11 +129,11 @@ static obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, uint size);
static obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size);
static ulong compute_free_objects(gs_ref_memory_t *);
static obj_header_t *alloc_obj(gs_ref_memory_t *, ulong, gs_memory_type_ptr_t, alloc_flags_t, client_name_t);
-static void consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem);
-static void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp);
-static chunk_t *alloc_acquire_chunk(gs_ref_memory_t *, ulong, bool, client_name_t);
-static chunk_t *alloc_add_chunk(gs_ref_memory_t *, ulong, client_name_t);
-void alloc_close_chunk(gs_ref_memory_t *);
+static void consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem);
+static void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, clump_t *cp);
+static clump_t *alloc_acquire_clump(gs_ref_memory_t *, ulong, bool, client_name_t);
+static clump_t *alloc_add_clump(gs_ref_memory_t *, ulong, client_name_t);
+void alloc_close_clump(gs_ref_memory_t *);
/*
* Define the standard implementation (with garbage collection)
@@ -198,7 +198,7 @@ const gs_memory_procs_t gs_ref_memory_procs =
/*
* Previous versions of this code used a simple linked list of
- * chunks. We change here to use a splay tree of chunks.
+ * clumps. We change here to use a splay tree of clumps.
* Splay Trees can be found documented in "Algorithms and Data
* Structures" by Jeffrey H Kingston.
*
@@ -212,11 +212,11 @@ const gs_memory_procs_t gs_ref_memory_procs =
* recently accessed nodes stay near the root.
*/
-/* #define DEBUG_CHUNKS */
-#ifdef DEBUG_CHUNKS
+/* #define DEBUG_CLUMPS */
+#ifdef DEBUG_CLUMPS
#define SANITY_CHECK(cp) sanity_check(cp)
-void sanity_check_rec(chunk_t *cp, chunk_t *p)
+void sanity_check_rec(clump_t *cp, clump_t *p)
{
if (cp->parent != p)
dprintf("Broken splay tree!\n");
@@ -234,7 +234,7 @@ void sanity_check_rec(chunk_t *cp, chunk_t *p)
}
}
-void sanity_check(chunk_t *cp)
+void sanity_check(clump_t *cp)
{
sanity_check_rec(cp, NULL);
}
@@ -254,10 +254,10 @@ enum
/* When initing with the root, we want to pass the smallest inorder one
* back immediately, and set it up so that we step right for the next
* one. */
-chunk_t *
-chunk_splay_walk_init(chunk_splay_walker *sw, const gs_ref_memory_t *mem)
+clump_t *
+clump_splay_walk_init(clump_splay_walker *sw, const gs_ref_memory_t *mem)
{
- chunk_t *cp = mem->root;
+ clump_t *cp = mem->root;
if (cp)
{
@@ -277,18 +277,18 @@ chunk_splay_walk_init(chunk_splay_walker *sw, const gs_ref_memory_t *mem)
* return the node we are given as the first one, and continue
* onwards in an in order fashion.
*/
-chunk_t *
-chunk_splay_walk_init_mid(chunk_splay_walker *sw, chunk_t *cp)
+clump_t *
+clump_splay_walk_init_mid(clump_splay_walker *sw, clump_t *cp)
{
sw->from = SPLAY_FROM_LEFT;
sw->cp = cp;
return cp;
}
-chunk_t *
-chunk_splay_walk_fwd(chunk_splay_walker *sw)
+clump_t *
+clump_splay_walk_fwd(clump_splay_walker *sw)
{
- chunk_t *cp = sw->cp;
+ clump_t *cp = sw->cp;
int from = sw->from;
if (cp == NULL)
@@ -325,7 +325,7 @@ chunk_splay_walk_fwd(chunk_splay_walker *sw)
if (from == SPLAY_FROM_RIGHT)
{
/* We have arrived from the right. Step up. */
- chunk_t *old = cp;
+ clump_t *old = cp;
cp = cp->parent;
from = ((cp == NULL || cp->left == old) ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
if (from == SPLAY_FROM_LEFT)
@@ -337,10 +337,10 @@ chunk_splay_walk_fwd(chunk_splay_walker *sw)
return cp;
}
-chunk_t *
-chunk_splay_walk_bwd(chunk_splay_walker *sw)
+clump_t *
+clump_splay_walk_bwd(clump_splay_walker *sw)
{
- chunk_t *cp = sw->cp;
+ clump_t *cp = sw->cp;
int from = sw->from;
if (cp == NULL)
@@ -377,7 +377,7 @@ chunk_splay_walk_bwd(chunk_splay_walker *sw)
if (from == SPLAY_FROM_LEFT)
{
/* We have arrived from the left. Step up. */
- chunk_t *old = cp;
+ clump_t *old = cp;
cp = cp->parent;
from = (cp == NULL || cp->left == old ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
if (from == SPLAY_FROM_LEFT)
@@ -389,10 +389,10 @@ chunk_splay_walk_bwd(chunk_splay_walker *sw)
return cp;
}
-static chunk_t *
-chunk_splay_remove(chunk_t *cp, gs_ref_memory_t *imem)
+static clump_t *
+clump_splay_remove(clump_t *cp, gs_ref_memory_t *imem)
{
- chunk_t *replacement;
+ clump_t *replacement;
if (cp->left == NULL)
{
@@ -412,7 +412,7 @@ chunk_splay_remove(chunk_t *cp, gs_ref_memory_t *imem)
while (replacement->right)
replacement = replacement->right;
/* Remove replacement - easy as just one child */
- (void)chunk_splay_remove(replacement, imem);
+ (void)clump_splay_remove(replacement, imem);
/* Replace cp with replacement */
if (cp->left)
cp->left->parent = replacement;
@@ -436,17 +436,17 @@ chunk_splay_remove(chunk_t *cp, gs_ref_memory_t *imem)
/* Here we apply a function to all the nodes in a tree in
* depth first order. This means that the given function
- * can safely alter: 1) the chunk, 2) it's children,
+ * can safely alter: 1) the clump, 2) it's children,
* 3) it's parents child pointer that points to it
* without fear of corruption. Specifically this means
* that the function can free (and unlink) the node
* if it wants.
*/
-chunk_t *
-chunk_splay_app(chunk_t *root, gs_ref_memory_t *imem, int (*fn)(chunk_t *, void *), void *arg)
+clump_t *
+clump_splay_app(clump_t *root, gs_ref_memory_t *imem, int (*fn)(clump_t *, void *), void *arg)
{
- chunk_t *step_to;
- chunk_t *cp = root;
+ clump_t *step_to;
+ clump_t *cp = root;
int from = SPLAY_FROM_ABOVE;
int res;
@@ -530,9 +530,9 @@ chunk_splay_app(chunk_t *root, gs_ref_memory_t *imem, int (*fn)(chunk_t *, void
* A B B C B C A B
*/
static void
-splay_move_to_root(chunk_t *x, gs_ref_memory_t *mem)
+splay_move_to_root(clump_t *x, gs_ref_memory_t *mem)
{
- chunk_t *y, *z;
+ clump_t *y, *z;
if (x == NULL)
return;
@@ -626,10 +626,10 @@ splay_move_to_root(chunk_t *x, gs_ref_memory_t *mem)
}
static void
-splay_insert(chunk_t *cp, gs_ref_memory_t *mem)
+splay_insert(clump_t *cp, gs_ref_memory_t *mem)
{
- chunk_t *node = NULL;
- chunk_t **root = &mem->root;
+ clump_t *node = NULL;
+ clump_t **root = &mem->root;
while (*root) {
node = *root;
@@ -651,11 +651,11 @@ splay_insert(chunk_t *cp, gs_ref_memory_t *mem)
* or local). Does not initialize global or space.
*/
static void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t,
- chunk_t **);
+ clump_t **);
gs_ref_memory_t *
-ialloc_alloc_state(gs_memory_t * parent, uint chunk_size)
+ialloc_alloc_state(gs_memory_t * parent, uint clump_size)
{
- chunk_t *cp;
+ clump_t *cp;
gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
if (iimem == 0)
@@ -665,14 +665,14 @@ ialloc_alloc_state(gs_memory_t * parent, uint chunk_size)
iimem->gs_lib_ctx = parent->gs_lib_ctx;
iimem->non_gc_memory = parent;
iimem->thread_safe_memory = parent->thread_safe_memory;
- iimem->chunk_size = chunk_size;
+ iimem->clump_size = clump_size;
#ifdef MEMENTO
iimem->large_size = 1;
#else
- iimem->large_size = ((chunk_size / 4) & -obj_align_mod) + 1;
+ iimem->large_size = ((clump_size / 4) & -obj_align_mod) + 1;
#endif
iimem->is_controlled = false;
- iimem->gc_status.vm_threshold = chunk_size * 3L;
+ iimem->gc_status.vm_threshold = clump_size * 3L;
iimem->gc_status.max_vm = max_long;
iimem->gc_status.signal_value = 0;
iimem->gc_status.enabled = false;
@@ -696,31 +696,31 @@ ialloc_alloc_state(gs_memory_t * parent, uint chunk_size)
return iimem;
}
-/* Allocate a 'solo' object with its own chunk. */
+/* Allocate a 'solo' object with its own clump. */
static void *
ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
- chunk_t ** pcp)
+ clump_t ** pcp)
{ /*
* We can't assume that the parent uses the same object header
* that we do, but the GC requires that allocators have
* such a header. Therefore, we prepend one explicitly.
*/
- chunk_t *cp =
- gs_raw_alloc_struct_immovable(parent, &st_chunk,
- "ialloc_solo(chunk)");
+ clump_t *cp =
+ gs_raw_alloc_struct_immovable(parent, &st_clump,
+ "ialloc_solo(clump)");
uint csize =
- ROUND_UP(sizeof(chunk_head_t) + sizeof(obj_header_t) +
+ ROUND_UP(sizeof(clump_head_t) + sizeof(obj_header_t) +
pstype->ssize,
obj_align_mod);
byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
- obj_header_t *obj = (obj_header_t *) (cdata + sizeof(chunk_head_t));
+ obj_header_t *obj = (obj_header_t *) (cdata + sizeof(clump_head_t));
if (cp == 0 || cdata == 0) {
gs_free_object(parent, cp, "ialloc_solo(allocation failure)");
gs_free_object(parent, cdata, "ialloc_solo(allocation failure)");
return 0;
}
- alloc_init_chunk(cp, cdata, cdata + csize, false, (chunk_t *) NULL);
+ alloc_init_clump(cp, cdata, cdata + csize, false, (clump_t *) NULL);
cp->cbot = cp->ctop;
cp->parent = cp->left = cp->right = 0;
cp->c_alone = true;
@@ -736,7 +736,7 @@ ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
void
ialloc_free_state(gs_ref_memory_t *iimem)
{
- chunk_t *cp;
+ clump_t *cp;
gs_memory_t *mem;
if (iimem == NULL)
return;
@@ -749,28 +749,28 @@ ialloc_free_state(gs_ref_memory_t *iimem)
}
/*
- * Add a chunk to an externally controlled allocator. Such allocators
+ * Add a clump to an externally controlled allocator. Such allocators
* allocate all objects as immovable, are not garbage-collected, and
* don't attempt to acquire additional memory on their own.
*/
int
-ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
+ialloc_add_clump(gs_ref_memory_t *imem, ulong space, client_name_t cname)
{
- chunk_t *cp;
+ clump_t *cp;
- /* Allow acquisition of this chunk. */
+ /* Allow acquisition of this clump. */
imem->is_controlled = false;
- imem->large_size = imem->chunk_size;
+ imem->large_size = imem->clump_size;
imem->limit = max_long;
imem->gc_status.max_vm = max_long;
- /* Acquire the chunk. */
- cp = alloc_add_chunk(imem, space, cname);
+ /* Acquire the clump. */
+ cp = alloc_add_clump(imem, space, cname);
/*
* Make all allocations immovable. Since the "movable" allocators
- * allocate within existing chunks, whereas the "immovable" ones
- * allocate in new chunks, we equate the latter to the former, even
+ * allocate within existing clumps, whereas the "immovable" ones
+ * allocate in new clumps, we equate the latter to the former, even
* though this seems backwards.
*/
imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
@@ -779,7 +779,7 @@ ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
imem->procs.alloc_string_immovable = imem->procs.alloc_string;
- /* Disable acquisition of additional chunks. */
+ /* Disable acquisition of additional clumps. */
imem->is_controlled = true;
imem->limit = 0;
@@ -881,16 +881,16 @@ ialloc_set_limit(register gs_ref_memory_t * mem)
struct free_data
{
gs_ref_memory_t *imem;
- chunk_t *allocator;
+ clump_t *allocator;
};
static int
-free_all_not_allocator(chunk_t *cp, void *arg)
+free_all_not_allocator(clump_t *cp, void *arg)
{
struct free_data *fd = (struct free_data *)arg;
if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem)
- alloc_free_chunk(cp, fd->imem);
+ alloc_free_clump(cp, fd->imem);
else
fd->allocator = cp;
@@ -898,7 +898,7 @@ free_all_not_allocator(chunk_t *cp, void *arg)
}
static int
-free_all_allocator(chunk_t *cp, void *arg)
+free_all_allocator(clump_t *cp, void *arg)
{
struct free_data *fd = (struct free_data *)arg;
@@ -906,7 +906,7 @@ free_all_allocator(chunk_t *cp, void *arg)
return SPLAY_APP_CONTINUE;
fd->allocator = cp;
- alloc_free_chunk(cp, fd->imem);
+ alloc_free_clump(cp, fd->imem);
return SPLAY_APP_STOP;
}
@@ -925,17 +925,17 @@ i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
fd.allocator = NULL;
if (free_mask & FREE_ALL_DATA && imem->root != NULL) {
- /* Free every chunk except the allocator */
- chunk_splay_app(imem->root, imem, free_all_not_allocator, &fd);
+ /* Free every clump except the allocator */
+ clump_splay_app(imem->root, imem, free_all_not_allocator, &fd);
- /* Reinstate the allocator as the sole chunk */
+ /* Reinstate the allocator as the sole clump */
imem->root = fd.allocator;
if (fd.allocator)
fd.allocator->parent = fd.allocator->left = fd.allocator->right = NULL;
}
if (free_mask & FREE_ALL_ALLOCATOR) {
/* Walk the tree to find the allocator. */
- chunk_splay_app(imem->root, imem, free_all_allocator, &fd);
+ clump_splay_app(imem->root, imem, free_all_allocator, &fd);
}
}
@@ -1272,7 +1272,7 @@ i_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
new_obj = obj;
} else /* try and trim the object -- but only if room for a dummy header */
if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
- trim_obj(imem, obj, new_size, (chunk_t *)0);
+ trim_obj(imem, obj, new_size, (clump_t *)0);
new_obj = obj;
}
if (new_obj) {
@@ -1311,7 +1311,7 @@ i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
pstype = pp->o_type;
#ifdef DEBUG
if (gs_debug_c('?')) {
- chunk_locator_t cld;
+ clump_locator_t cld;
if (pstype == &st_free) {
mlprintf2(mem, "%s: object 0x%lx already free!\n",
@@ -1321,7 +1321,7 @@ i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
/* Check that this allocator owns the object being freed. */
cld.memory = imem;
while ((cld.cp = cld.memory->root),
- !chunk_locate_ptr(ptr, &cld)
+ !clump_locate_ptr(ptr, &cld)
) {
if (!cld.memory->saved) {
mlprintf3(mem, "%s: freeing 0x%lx, not owned by memory 0x%lx!\n",
@@ -1339,7 +1339,7 @@ i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
cld.cp->cbot))
) {
- mlprintf5(mem, "%s: freeing 0x%lx,\n\toutside chunk 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
+ mlprintf5(mem, "%s: freeing 0x%lx,\n\toutside clump 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
client_name_string(cname), (ulong) ptr,
(ulong) cld.cp, (ulong) cld.cp->cbase,
(ulong) cld.cp->cbot);
@@ -1371,37 +1371,37 @@ i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
gs_alloc_fill(ptr, gs_alloc_fill_free, size);
imem->cc.cbot = (byte *) pp;
/* IFF this object is adjacent to (or below) the byte after the
- * highest free object, do the consolidation within this chunk. */
+ * highest free object, do the consolidation within this clump. */
if ((byte *)pp <= imem->cc.int_freed_top) {
- consolidate_chunk_free(&(imem->cc), imem);
+ consolidate_clump_free(&(imem->cc), imem);
}
return;
}
if (pp->o_alone) {
/*
- * We gave this object its own chunk. Free the entire chunk,
+ * We gave this object its own clump. Free the entire clump,
* unless it belongs to an older save level, in which case
* we mustn't overwrite it.
*/
- chunk_locator_t cl;
+ clump_locator_t cl;
#ifdef DEBUG
{
- chunk_locator_t cld;
+ clump_locator_t cld;
cld.memory = imem;
cld.cp = 0;
if (gs_debug_c('a'))
alloc_trace(
- (chunk_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
+ (clump_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
imem, cname, pstype, size, ptr);
}
#endif
cl.memory = imem;
cl.cp = 0;
- if (chunk_locate_ptr(ptr, &cl)) {
+ if (clump_locate_ptr(ptr, &cl)) {
if (!imem->is_controlled)
- alloc_free_chunk(cl.cp, imem);
+ alloc_free_clump(cl.cp, imem);
return;
}
/* Don't overwrite even if gs_alloc_debug is set. */
@@ -1413,7 +1413,7 @@ i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
* overwrite it.
*/
imem->cfreed.memory = imem;
- if (chunk_locate(ptr, &imem->cfreed)) {
+ if (clump_locate(ptr, &imem->cfreed)) {
obj_header_t **pfl;
if (size > max_freelist_size) {
@@ -1425,9 +1425,9 @@ i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
log2_obj_align_mod];
}
/* keep track of highest object on a freelist */
- /* If we're dealing with a block in the currently open chunk
- (in imem->cc) update that, otherwise, update the chunk in
- the chunk list (in imem->cfreed.cp)
+ /* If we're dealing with a block in the currently open clump
+ (in imem->cc) update that, otherwise, update the clump in
+ the clump list (in imem->cfreed.cp)
*/
if (imem->cfreed.cp->chead == imem->cc.chead) {
if ((byte *)pp >= imem->cc.int_freed_top) {
@@ -1461,13 +1461,13 @@ i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
{
gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
byte *str;
- chunk_splay_walker sw;
+ clump_splay_walker sw;
/*
- * Cycle through the chunks at the current save level, starting
+ * Cycle through the clumps at the current save level, starting
* with the currently open one.
*/
- chunk_t *cp_orig = chunk_splay_walk_init_mid(&sw, imem->pcc);
+ clump_t *cp_orig = clump_splay_walk_init_mid(&sw, imem->pcc);
if (nbytes + (uint)HDR_ID_OFFSET < nbytes)
return NULL;
@@ -1479,9 +1479,9 @@ i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
return NULL;
#endif
if (cp_orig == 0) {
- /* Open an arbitrary chunk. */
- imem->pcc = chunk_splay_walk_init(&sw, imem);
- alloc_open_chunk(imem);
+ /* Open an arbitrary clump. */
+ imem->pcc = clump_splay_walk_init(&sw, imem);
+ alloc_open_clump(imem);
}
top:
if (imem->cc.ctop - imem->cc.cbot > nbytes) {
@@ -1494,33 +1494,33 @@ top:
ASSIGN_HDR_ID(str);
return str;
}
- /* Try the next chunk. */
+ /* Try the next clump. */
{
- chunk_t *cp = chunk_splay_walk_fwd(&sw);
+ clump_t *cp = clump_splay_walk_fwd(&sw);
- alloc_close_chunk(imem);
+ alloc_close_clump(imem);
if (cp == NULL && cp_orig != NULL)
- cp = chunk_splay_walk_init(&sw, imem);
+ cp = clump_splay_walk_init(&sw, imem);
imem->pcc = cp;
- alloc_open_chunk(imem);
+ alloc_open_clump(imem);
if (cp != cp_orig)
goto top;
}
- if (nbytes > string_space_quanta(max_uint - sizeof(chunk_head_t)) *
+ if (nbytes > string_space_quanta(max_uint - sizeof(clump_head_t)) *
string_data_quantum
) { /* Can't represent the size in a uint! */
return 0;
}
- if (nbytes >= imem->large_size) { /* Give it a chunk all its own. */
+ if (nbytes >= imem->large_size) { /* Give it a clump all its own. */
return i_alloc_string_immovable(mem, nbytes, cname);
- } else { /* Add another chunk. */
- chunk_t *cp =
- alloc_acquire_chunk(imem, (ulong) imem->chunk_size, true, "chunk");
+ } else { /* Add another clump. */
+ clump_t *cp =
+ alloc_acquire_clump(imem, (ulong) imem->clump_size, true, "clump");
if (cp == 0)
return 0;
- alloc_close_chunk(imem);
- imem->pcc = chunk_splay_walk_init_mid(&sw, cp);
+ alloc_close_clump(imem);
+ imem->pcc = clump_splay_walk_init_mid(&sw, cp);
imem->cc = *imem->pcc;
gs_alloc_fill(imem->cc.cbase, gs_alloc_fill_free,
imem->cc.climit - imem->cc.cbase);
@@ -1533,7 +1533,7 @@ i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
byte *str;
uint asize;
- chunk_t *cp;
+ clump_t *cp;
nbytes += HDR_ID_OFFSET;
@@ -1541,10 +1541,10 @@ i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
if (Memento_failThisEvent())
return NULL;
#endif
- /* Give it a chunk all its own. */
- asize = string_chunk_space(nbytes) + sizeof(chunk_head_t);
- cp = alloc_acquire_chunk(imem, (ulong) asize, true,
- "large string chunk");
+ /* Give it a clump all its own. */
+ asize = string_clump_space(nbytes) + sizeof(clump_head_t);
+ cp = alloc_acquire_clump(imem, (ulong) asize, true,
+ "large string clump");
if (cp == 0)
return 0;
@@ -1659,14 +1659,14 @@ i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
ulong unused = imem->lost.refs + imem->lost.strings;
ulong inner = 0;
- chunk_splay_walker sw;
- chunk_t *cp;
+ clump_splay_walker sw;
+ clump_t *cp;
- alloc_close_chunk(imem);
- /* Add up unallocated space within each chunk. */
- /* Also keep track of space allocated to inner chunks, */
+ alloc_close_clump(imem);
+ /* Add up unallocated space within each clump. */
+ /* Also keep track of space allocated to inner clumps, */
/* which are included in previous_status.allocated. */
- for (cp = chunk_splay_walk_init(&sw, imem); cp != NULL; cp = chunk_splay_walk_fwd(&sw))
+ for (cp = clump_splay_walk_init(&sw, imem); cp != NULL; cp = clump_splay_walk_fwd(&sw))
{
unused += cp->ctop - cp->cbot;
if (cp->outer)
@@ -1751,7 +1751,7 @@ large_freelist_alloc(gs_ref_memory_t *mem, uint size)
}
if (best_fit == 0) {
/*
- * No single free chunk is large enough, but since we scanned the
+ * No single free clump is large enough, but since we scanned the
* entire list, we now have an accurate updated value for
* largest_free_size.
*/
@@ -1761,7 +1761,7 @@ large_freelist_alloc(gs_ref_memory_t *mem, uint size)
/* Remove from freelist & return excess memory to free */
*best_fit_prev = *(obj_header_t **)best_fit;
- trim_obj(mem, best_fit, aligned_size, (chunk_t *)0);
+ trim_obj(mem, best_fit, aligned_size, (clump_t *)0);
/* Pre-init block header; o_alone & o_type are already init'd */
best_fit[-1].o_size = size;
@@ -1778,15 +1778,15 @@ alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
/*
- * Give the object a chunk all its own. Note that this case does
+ * Give the object a clump all its own. Note that this case does
* not occur if is_controlled is true.
*/
ulong asize =
((lsize + obj_align_mask) & -obj_align_mod) +
sizeof(obj_header_t);
- chunk_t *cp =
- alloc_acquire_chunk(mem, asize + sizeof(chunk_head_t), false,
- "large object chunk");
+ clump_t *cp =
+ alloc_acquire_clump(mem, asize + sizeof(clump_head_t), false,
+ "large object clump");
if (
#if ARCH_SIZEOF_LONG > ARCH_SIZEOF_INT
@@ -1806,11 +1806,11 @@ alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
ptr->o_size = lsize;
} else {
/*
- * Cycle through the chunks at the current save level, starting
+ * Cycle through the clumps at the current save level, starting
* with the currently open one.
*/
- chunk_splay_walker sw;
- chunk_t *cp_orig = chunk_splay_walk_init_mid(&sw, mem->pcc);
+ clump_splay_walker sw;
+ clump_t *cp_orig = clump_splay_walk_init_mid(&sw, mem->pcc);
uint asize = obj_size_round((uint) lsize);
bool allocate_success = false;
@@ -1823,9 +1823,9 @@ alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
}
if (cp_orig == 0) {
- /* Open an arbitrary chunk. */
- mem->pcc = chunk_splay_walk_init(&sw, mem);
- alloc_open_chunk(mem);
+ /* Open an arbitrary clump. */
+ mem->pcc = clump_splay_walk_init(&sw, mem);
+ alloc_open_clump(mem);
}
#define CAN_ALLOC_AT_END(cp)\
@@ -1844,19 +1844,19 @@ alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
break;
}
}
- /* No luck, go on to the next chunk. */
+ /* No luck, go on to the next clump. */
{
- chunk_t *cp = chunk_splay_walk_fwd(&sw);
+ clump_t *cp = clump_splay_walk_fwd(&sw);
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
if (cp == NULL && cp_orig != NULL)
- cp = chunk_splay_walk_init(&sw, mem);
+ cp = clump_splay_walk_init(&sw, mem);
mem->pcc = cp;
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
}
} while (mem->pcc != cp_orig);
-#ifdef CONSOLIDATE_BEFORE_ADDING_CHUNK
+#ifdef CONSOLIDATE_BEFORE_ADDING_clump
if (!allocate_success) {
/*
* Try consolidating free space before giving up.
@@ -1864,17 +1864,17 @@ alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
* a lot of computation and doesn't seem to improve things much.
*/
if (!mem->is_controlled) { /* already did this if controlled */
- chunk_t *cp;
+ clump_t *cp;
- alloc_close_chunk(mem);
- for (cp = chunk_splay_walk_init_mid(&sw, cp_orig); cp != NULL; cp = chunk_splay_walk_fwd(&sw))
+ alloc_close_clump(mem);
+ for (cp = clump_splay_walk_init_mid(&sw, cp_orig); cp != NULL; cp = clump_splay_walk_fwd(&sw))
{
if (cp == NULL && cp_orig != NULL)
- cp = chunk_splay_walk_init(&sw, mem);
- consolidate_chunk_free(cp, mem);
+ cp = clump_splay_walk_init(&sw, mem);
+ consolidate_clump_free(cp, mem);
if (CAN_ALLOC_AT_END(cp)) {
mem->pcc = cp;
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
allocate_success = true;
break;
}
@@ -1886,9 +1886,9 @@ alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
#undef CAN_ALLOC_AT_END
if (!allocate_success) {
- /* Add another chunk. */
- chunk_t *cp =
- alloc_add_chunk(mem, (ulong)mem->chunk_size, "chunk");
+ /* Add another clump. */
+ clump_t *cp =
+ alloc_add_clump(mem, (ulong)mem->clump_size, "clump");
if (cp) {
/* mem->pcc == cp, mem->cc == *mem->pcc. */
@@ -1929,12 +1929,12 @@ done:
* (int_freed_top).
*/
static void
-consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
+consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem)
{
obj_header_t *begin_free = 0;
- cp->int_freed_top = cp->cbase; /* below all objects in chunk */
- SCAN_CHUNK_OBJECTS(cp)
+ cp->int_freed_top = cp->cbase; /* below all objects in clump */
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if (pre->o_type == &st_free) {
if (begin_free == 0)
@@ -1950,7 +1950,7 @@ consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
/* Remove the free objects from the freelists. */
remove_range_from_freelist(mem, begin_free, cp->cbot);
if_debug4m('a', (const gs_memory_t *)mem,
- "[a]resetting chunk 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
+ "[a]resetting clump 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
(ulong) cp, (ulong) cp->cbot, (ulong) begin_free,
(ulong) ((byte *) cp->cbot - (byte *) begin_free));
cp->cbot = (byte *) begin_free;
@@ -1958,15 +1958,15 @@ consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
}
static int
-consolidate(chunk_t *cp, void *arg)
+consolidate(clump_t *cp, void *arg)
{
gs_ref_memory_t *mem = (gs_ref_memory_t *)arg;
- consolidate_chunk_free(cp, mem);
+ consolidate_clump_free(cp, mem);
if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
- /* The entire chunk is free. */
+ /* The entire clump is free. */
if (!mem->is_controlled) {
- alloc_free_chunk(cp, mem);
+ alloc_free_clump(cp, mem);
if (mem->pcc == cp)
mem->pcc = NULL;
}
@@ -1979,20 +1979,20 @@ consolidate(chunk_t *cp, void *arg)
void
ialloc_consolidate_free(gs_ref_memory_t *mem)
{
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
- /* We used to visit chunks in reverse order to encourage LIFO behavior,
+ /* We used to visit clumps in reverse order to encourage LIFO behavior,
* but with binary trees this is not possible (unless you want to
* either change the tree during the process, recurse, or otherwise
* hold the state). */
- chunk_splay_app(mem->root, mem, consolidate, mem);
+ clump_splay_app(mem->root, mem, consolidate, mem);
- /* NOTE: Previously, if we freed the current chunk, we'd move to whatever the
+ /* NOTE: Previously, if we freed the current clump, we'd move to whatever the
* bigger of it's children was. We now just move to the root. */
if (mem->pcc == NULL)
mem->pcc = mem->root;
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
}
static void
i_consolidate_free(gs_memory_t *mem)
@@ -2009,7 +2009,7 @@ typedef struct
} scavenge_data;
static int
-scavenge(chunk_t *cp, void *arg)
+scavenge(clump_t *cp, void *arg)
{
scavenge_data *sd = (scavenge_data *)arg;
obj_header_t *begin_free = NULL;
@@ -2017,7 +2017,7 @@ scavenge(chunk_t *cp, void *arg)
sd->found_pre = NULL;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if (pre->o_type == &st_free) {
if (begin_free == 0) {
@@ -2049,7 +2049,7 @@ scavenge(chunk_t *cp, void *arg)
return SPLAY_APP_CONTINUE;
}
-/* try to free-up given amount of space from freespace below chunk base */
+/* try to free-up given amount of space from freespace below clump base */
static obj_header_t * /* returns uninitialized object hdr, NULL if none found */
scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
{
@@ -2062,7 +2062,7 @@ scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
sd.mem = mem;
sd.request_size = request_size;
- chunk_splay_app(mem->root, mem, scavenge, &sd);
+ clump_splay_app(mem->root, mem, scavenge, &sd);
return sd.found_pre;
}
@@ -2141,7 +2141,7 @@ remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
/* Trim a memory object down to a given size */
static void
-trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
+trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, clump_t *cp)
/* Obj must have rounded size == req'd size, or have enough room for */
/* trailing dummy obj_header */
{
@@ -2156,13 +2156,13 @@ trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
if (old_rounded_size == rounded_size)
return; /* nothing more to do here */
/*
- * If the object is alone in its chunk, move cbot to point to the end
+ * If the object is alone in its clump, move cbot to point to the end
* of the object.
*/
if (pre_obj->o_alone) {
if (!cp) {
mem->cfreed.memory = mem;
- if (chunk_locate(obj, &mem->cfreed)) {
+ if (clump_locate(obj, &mem->cfreed)) {
cp = mem->cfreed.cp;
}
}
@@ -2258,26 +2258,26 @@ i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
gs_free_object(imem->non_gc_memory, rp, "i_unregister_root");
}
-/* ================ Chunks ================ */
+/* ================ clumps ================ */
-public_st_chunk();
+public_st_clump();
-/* Insert a chunk in the chain. This is exported for the GC and for */
+/* Insert a clump in the chain. This is exported for the GC and for */
/* the forget_save operation. */
void
-alloc_link_chunk(chunk_t * cp, gs_ref_memory_t * imem)
+alloc_link_clump(clump_t * cp, gs_ref_memory_t * imem)
{
splay_insert(cp, imem);
}
-/* Add a chunk for ordinary allocation. */
-static chunk_t *
-alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
+/* Add a clump for ordinary allocation. */
+static clump_t *
+alloc_add_clump(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
{
- chunk_t *cp = alloc_acquire_chunk(mem, csize, true, cname);
+ clump_t *cp = alloc_acquire_clump(mem, csize, true, cname);
if (cp) {
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
mem->pcc = cp;
mem->cc = *mem->pcc;
gs_alloc_fill(mem->cc.cbase, gs_alloc_fill_free,
@@ -2286,16 +2286,16 @@ alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
return cp;
}
-/* Acquire a chunk. If we would exceed MaxLocalVM (if relevant), */
+/* Acquire a clump. If we would exceed MaxLocalVM (if relevant), */
/* or if we would exceed the VMThreshold and psignal is NULL, */
/* return 0; if we would exceed the VMThreshold but psignal is valid, */
/* just set the signal and return successfully. */
-static chunk_t *
-alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
+static clump_t *
+alloc_acquire_clump(gs_ref_memory_t * mem, ulong csize, bool has_strings,
client_name_t cname)
{
gs_memory_t *parent = mem->non_gc_memory;
- chunk_t *cp;
+ clump_t *cp;
byte *cdata;
#if ARCH_SIZEOF_LONG > ARCH_SIZEOF_INT
@@ -2303,7 +2303,7 @@ alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
if (csize != (uint) csize)
return 0;
#endif
- cp = gs_raw_alloc_struct_immovable(parent, &st_chunk, cname);
+ cp = gs_raw_alloc_struct_immovable(parent, &st_clump, cname);
/* gc_status.signal_value is initialised to zero when the
* allocator is created, only the Postscript interpreter
@@ -2332,25 +2332,25 @@ alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
mem->gc_status.requested = csize;
return 0;
}
- alloc_init_chunk(cp, cdata, cdata + csize, has_strings, (chunk_t *) 0);
- alloc_link_chunk(cp, mem);
- mem->allocated += st_chunk.ssize + csize;
+ alloc_init_clump(cp, cdata, cdata + csize, has_strings, (clump_t *) 0);
+ alloc_link_clump(cp, mem);
+ mem->allocated += st_clump.ssize + csize;
return cp;
}
-/* Initialize the pointers in a chunk. This is exported for save/restore. */
+/* Initialize the pointers in a clump. This is exported for save/restore. */
/* The bottom pointer must be aligned, but the top pointer need not */
/* be aligned. */
void
-alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
- chunk_t * outer)
+alloc_init_clump(clump_t * cp, byte * bot, byte * top, bool has_strings,
+ clump_t * outer)
{
byte *cdata = bot;
if (outer != 0)
outer->inner_count++;
- cp->chead = (chunk_head_t *) cdata;
- cdata += sizeof(chunk_head_t);
+ cp->chead = (clump_head_t *) cdata;
+ cdata += sizeof(clump_head_t);
cp->cbot = cp->cbase = cp->int_freed_top = cdata;
cp->cend = top;
cp->rcur = 0;
@@ -2363,7 +2363,7 @@ alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
/*
* We allocate a large enough string marking and reloc table
- * to cover the entire chunk.
+ * to cover the entire clump.
*/
uint nquanta = string_space_quanta(top - cdata);
@@ -2385,41 +2385,41 @@ alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
alloc_init_free_strings(cp);
}
-/* Initialize the string freelists in a chunk. */
+/* Initialize the string freelists in a clump. */
void
-alloc_init_free_strings(chunk_t * cp)
+alloc_init_free_strings(clump_t * cp)
{
if (cp->sfree1)
memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
cp->sfree = 0;
}
-/* Close up the current chunk. */
+/* Close up the current clump. */
/* This is exported for save/restore and the GC. */
void
-alloc_close_chunk(gs_ref_memory_t * mem)
+alloc_close_clump(gs_ref_memory_t * mem)
{
if (mem->pcc != 0) {
*mem->pcc = mem->cc;
#ifdef DEBUG
if (gs_debug_c('a')) {
dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem));
- dmprintf_chunk((const gs_memory_t *)mem, "closing chunk", mem->pcc);
+ dmprintf_clump((const gs_memory_t *)mem, "closing clump", mem->pcc);
}
#endif
}
}
-/* Reopen the current chunk after a GC or restore. */
+/* Reopen the current clump after a GC or restore. */
void
-alloc_open_chunk(gs_ref_memory_t * mem)
+alloc_open_clump(gs_ref_memory_t * mem)
{
if (mem->pcc != 0) {
mem->cc = *mem->pcc;
#ifdef DEBUG
if (gs_debug_c('a')) {
dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem));
- dmprintf_chunk((const gs_memory_t *)mem, "opening chunk", mem->pcc);
+ dmprintf_clump((const gs_memory_t *)mem, "opening clump", mem->pcc);
}
#endif
}
@@ -2427,9 +2427,9 @@ alloc_open_chunk(gs_ref_memory_t * mem)
#ifdef DEBUG
static int
-check_in_chunk(chunk_t *cp, void *arg)
+check_in_clump(clump_t *cp, void *arg)
{
- chunk_t **cpp = (chunk_t **)arg;
+ clump_t **cpp = (clump_t **)arg;
if (*cpp != cp)
return SPLAY_APP_CONTINUE;
@@ -2439,23 +2439,23 @@ check_in_chunk(chunk_t *cp, void *arg)
}
#endif
-/* Remove a chunk from the chain. This is exported for the GC. */
+/* Remove a clump from the chain. This is exported for the GC. */
void
-alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
+alloc_unlink_clump(clump_t * cp, gs_ref_memory_t * mem)
{
#ifdef DEBUG
- if (gs_alloc_debug) { /* Check to make sure this chunk belongs to this allocator. */
- chunk_t *found = cp;
- chunk_splay_app(mem->root, mem, check_in_chunk, &found);
+ if (gs_alloc_debug) { /* Check to make sure this clump belongs to this allocator. */
+ clump_t *found = cp;
+ clump_splay_app(mem->root, mem, check_in_clump, &found);
if (found != NULL) {
- mlprintf2((const gs_memory_t *)mem, "unlink_chunk 0x%lx not owned by memory 0x%lx!\n",
+ mlprintf2((const gs_memory_t *)mem, "unlink_clump 0x%lx not owned by memory 0x%lx!\n",
(ulong) cp, (ulong) mem);
return; /*gs_abort(); */
}
}
#endif
- (void)chunk_splay_remove(cp, mem);
+ (void)clump_splay_remove(cp, mem);
if (mem->pcc != NULL) {
mem->cc.left = mem->pcc->left;
mem->cc.right = mem->pcc->right;
@@ -2468,41 +2468,41 @@ alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
}
/*
- * Free a chunk. This is exported for the GC. Since we eventually use
- * this to free the chunk containing the allocator itself, we must be
+ * Free a clump. This is exported for the GC. Since we eventually use
+ * this to free the clump containing the allocator itself, we must be
* careful not to reference anything in the allocator after freeing the
- * chunk data.
+ * clump data.
*/
void
-alloc_free_chunk(chunk_t * cp, gs_ref_memory_t * mem)
+alloc_free_clump(clump_t * cp, gs_ref_memory_t * mem)
{
gs_memory_t *parent = mem->non_gc_memory;
byte *cdata = (byte *)cp->chead;
ulong csize = (byte *)cp->cend - cdata;
- alloc_unlink_chunk(cp, mem);
- mem->allocated -= st_chunk.ssize;
+ alloc_unlink_clump(cp, mem);
+ mem->allocated -= st_clump.ssize;
if (mem->cfreed.cp == cp)
mem->cfreed.cp = 0;
if (cp->outer == 0) {
mem->allocated -= csize;
- gs_free_object(parent, cdata, "alloc_free_chunk(data)");
+ gs_free_object(parent, cdata, "alloc_free_clump(data)");
} else {
cp->outer->inner_count--;
gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
}
- gs_free_object(parent, cp, "alloc_free_chunk(chunk struct)");
+ gs_free_object(parent, cp, "alloc_free_clump(clump struct)");
}
-/* Find the chunk for a pointer. */
+/* Find the clump for a pointer. */
/* Note that this only searches the current save level. */
-/* Since a given save level can't contain both a chunk and an inner chunk */
-/* of that chunk, we can stop when is_within_chunk succeeds, and just test */
-/* is_in_inner_chunk then. */
+/* Since a given save level can't contain both a clump and an inner clump */
+/* of that clump, we can stop when is_within_clump succeeds, and just test */
+/* is_in_inner_clump then. */
bool
-chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
+clump_locate_ptr(const void *ptr, clump_locator_t * clp)
{
- chunk_t *cp = clp->memory->root;
+ clump_t *cp = clp->memory->root;
while (cp)
{
@@ -2519,7 +2519,7 @@ chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
/* Found it! */
splay_move_to_root(cp, clp->memory);
clp->cp = cp;
- return !ptr_is_in_inner_chunk(ptr, cp);
+ return !ptr_is_in_inner_clump(ptr, cp);
}
return false;
}
@@ -2708,12 +2708,12 @@ debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t
}
}
-/* Print the contents of a chunk with the given options. */
+/* Print the contents of a clump with the given options. */
/* Relevant options: all. */
void
-debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_t * control)
+debug_dump_clump(const gs_memory_t *mem, const clump_t * cp, const dump_control_t * control)
{
- dmprintf1(mem, "chunk at 0x%lx:\n", (ulong) cp);
+ dmprintf1(mem, "clump at 0x%lx:\n", (ulong) cp);
dmprintf3(mem, " chead=0x%lx cbase=0x%lx sbase=0x%lx\n",
(ulong) cp->chead, (ulong) cp->cbase, (ulong) cp->sbase);
dmprintf3(mem, " rcur=0x%lx rtop=0x%lx cbot=0x%lx\n",
@@ -2736,7 +2736,7 @@ debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_
min(control->top, cp->climit)),
0, true);
}
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if (obj_in_control_region(pre + 1,
(const byte *)(pre + 1) + size,
@@ -2746,28 +2746,28 @@ debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_
END_OBJECTS_SCAN_NO_ABORT
}
void
-debug_print_chunk(const gs_memory_t *mem, const chunk_t * cp)
+debug_print_clump(const gs_memory_t *mem, const clump_t * cp)
{
dump_control_t control;
control = dump_control_default;
- debug_dump_chunk(mem, cp, &control);
+ debug_dump_clump(mem, cp, &control);
}
-/* Print the contents of all chunks managed by an allocator. */
+/* Print the contents of all clumps managed by an allocator. */
/* Relevant options: all. */
void
debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
{
- const chunk_t *mcp;
- chunk_splay_walker sw;
+ const clump_t *mcp;
+ clump_splay_walker sw;
- for (mcp = chunk_splay_walk_init(&sw, mem); mcp != NULL; mcp = chunk_splay_walk_fwd(&sw))
+ for (mcp = clump_splay_walk_init(&sw, mem); mcp != NULL; mcp = clump_splay_walk_fwd(&sw))
{
- const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
+ const clump_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
if (obj_in_control_region(cp->cbase, cp->cend, control))
- debug_dump_chunk((const gs_memory_t *)mem, cp, control);
+ debug_dump_clump((const gs_memory_t *)mem, cp, control);
}
}
@@ -2781,16 +2781,16 @@ debug_dump_allocator(const gs_ref_memory_t *mem)
void
debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
{
- chunk_splay_walker sw;
+ clump_splay_walker sw;
dump_control_t control;
- const chunk_t *mcp;
+ const clump_t *mcp;
control.options = 0;
- for (mcp = chunk_splay_walk_init(&sw, mem); mcp; mcp = chunk_splay_walk_fwd(&sw))
+ for (mcp = clump_splay_walk_init(&sw, mem); mcp; mcp = clump_splay_walk_fwd(&sw))
{
- const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
+ const clump_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
- SCAN_CHUNK_OBJECTS(cp);
+ SCAN_CLUMP_OBJECTS(cp);
DO_ALL
struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
uint index = 0;
diff --git a/base/gxalloc.h b/base/gxalloc.h
index 4520942f6..6e85ac084 100644
--- a/base/gxalloc.h
+++ b/base/gxalloc.h
@@ -28,10 +28,10 @@ typedef struct gs_ref_memory_s gs_ref_memory_t;
#include "gsalloc.h"
#include "gxobj.h"
-/* ================ Chunks ================ */
+/* ================ Clumps ================ */
/*
- * We obtain memory from the operating system in `chunks'. A chunk
+ * We obtain memory from the operating system in `clumps'. A clump
* may hold only a single large object (or string), or it may hold
* many objects (allocated from the bottom up, always aligned)
* and strings (allocated from the top down, not aligned).
@@ -50,8 +50,8 @@ typedef struct gs_ref_memory_s gs_ref_memory_t;
*/
/*
- * When we do a save, we create a new 'inner' chunk out of the remaining
- * space in the currently active chunk. Inner chunks must not be freed
+ * When we do a save, we create a new 'inner' clump out of the remaining
+ * space in the currently active clump. Inner clumps must not be freed
* by a restore.
*
* The garbage collector implements relocation for refs by scanning
@@ -64,14 +64,14 @@ typedef struct gs_ref_memory_s gs_ref_memory_t;
/*
* Strings carry some additional overhead for use by the GC.
- * At the top of the chunk is a table of relocation values for
+ * At the top of the clump is a table of relocation values for
* 16N-character blocks of strings, where N is sizeof(uint).
* This table is aligned, by adding padding above it if necessary.
* Just below it is a mark table for the strings. This table is also aligned,
* to improve GC performance. The actual string data start below
- * the mark table. These tables are not needed for a chunk that holds
+ * the mark table. These tables are not needed for a clump that holds
* a single large (non-string) object, but they are needed for all other
- * chunks, including chunks created to hold a single large string.
+ * clumps, including clumps created to hold a single large string.
*/
/*
@@ -98,10 +98,10 @@ typedef uint string_reloc_offset;
(string_data_quantum + (string_data_quantum / 8) +\
sizeof(string_reloc_offset))
/*
- * Compute the amount of space needed for a chunk that holds only
+ * Compute the amount of space needed for a clump that holds only
* a string of a given size.
*/
-#define string_chunk_space(nbytes)\
+#define string_clump_space(nbytes)\
(((nbytes) + (string_data_quantum - 1)) / string_data_quantum *\
string_space_quantum)
/*
@@ -115,28 +115,28 @@ typedef uint string_reloc_offset;
#define string_quanta_mark_size(nquanta)\
((nquanta) * (string_data_quantum / 8))
/*
- * Compute the size of the string freelists for a chunk.
+ * Compute the size of the string freelists for a clump.
*/
#define STRING_FREELIST_SPACE(cp)\
(((cp->climit - csbase(cp) + 255) >> 8) * sizeof(*cp->sfree1))
/*
- * To allow the garbage collector to combine chunks, we store in the
- * head of each chunk the address to which its contents will be moved.
+ * To allow the garbage collector to combine clumps, we store in the
+ * head of each clump the address to which its contents will be moved.
*/
-/*typedef struct chunk_head_s chunk_head_t; *//* in gxobj.h */
+/*typedef struct clump_head_s clump_head_t; *//* in gxobj.h */
-/* Structure for a chunk. */
-typedef struct chunk_s chunk_t;
-struct chunk_s {
- chunk_head_t *chead; /* chunk head, bottom of chunk; */
+/* Structure for a clump. */
+typedef struct clump_s clump_t;
+struct clump_s {
+ clump_head_t *chead; /* clump head, bottom of clump; */
/* csbase is an alias for chead */
#define csbase(cp) ((byte *)(cp)->chead)
/* Note that allocation takes place both from the bottom up */
/* (aligned objects) and from the top down (strings). */
- byte *cbase; /* bottom of chunk data area */
+ byte *cbase; /* bottom of clump data area */
byte *int_freed_top; /* top of most recent internal free area */
- /* in chunk (which may no longer be free), */
+ /* in clump (which may no longer be free), */
/* used to decide when to consolidate */
/* trailing free space in allocated area */
byte *cbot; /* bottom of free area */
@@ -146,16 +146,16 @@ struct chunk_s {
byte *ctop; /* top of free area */
/* (bottom of strings) */
byte *climit; /* top of strings */
- byte *cend; /* top of chunk */
- chunk_t *parent; /* splay tree parent chunk */
- chunk_t *left; /* splay tree left chunk */
- chunk_t *right; /* splay tree right chunk */
- chunk_t *outer; /* the chunk of which this is */
- /* an inner chunk, if any */
- uint inner_count; /* number of chunks of which this is */
- /* the outer chunk, if any */
- bool has_refs; /* true if any refs in chunk */
- bool c_alone; /* this chunk is for a single allocation */
+ byte *cend; /* top of clump */
+ clump_t *parent; /* splay tree parent clump */
+ clump_t *left; /* splay tree left clump */
+ clump_t *right; /* splay tree right clump */
+ clump_t *outer; /* the clump of which this is */
+ /* an inner clump, if any */
+ uint inner_count; /* number of clumps of which this is */
+ /* the outer clump, if any */
+ bool has_refs; /* true if any refs in clump */
+ bool c_alone; /* this clump is for a single allocation */
/*
* Free lists for single bytes in blocks of 1 to 2*N-1 bytes, one per
* 256 bytes in [csbase..climit), where N is sizeof(uint). The chain
@@ -187,15 +187,15 @@ struct chunk_s {
byte *rescan_top; /* top of range ditto */
};
-/* The chunk descriptor is exported only for isave.c. */
-extern_st(st_chunk);
-#define public_st_chunk() /* in ialloc.c */\
- gs_public_st_ptrs3(st_chunk, chunk_t, "chunk_t",\
- chunk_enum_ptrs, chunk_reloc_ptrs, left, right, parent)
+/* The clump descriptor is exported only for isave.c. */
+extern_st(st_clump);
+#define public_st_clump() /* in ialloc.c */\
+ gs_public_st_ptrs3(st_clump, clump_t, "clump_t",\
+ clump_enum_ptrs, clump_reloc_ptrs, left, right, parent)
/*
- * Macros for scanning a chunk linearly, with the following schema:
- * SCAN_CHUNK_OBJECTS(cp) << declares pre, size >>
+ * Macros for scanning a clump linearly, with the following schema:
+ * SCAN_CLUMP_OBJECTS(cp) << declares pre, size >>
* << code for all objects -- size not set yet >>
* DO_ALL
* << code for all objects -- size is set >>
@@ -203,7 +203,7 @@ extern_st(st_chunk);
*
* NB on error END_OBJECTS_SCAN calls gs_abort in debug systems.
*/
-#define SCAN_CHUNK_OBJECTS(cp)\
+#define SCAN_CLUMP_OBJECTS(cp)\
{ obj_header_t *pre = (obj_header_t *)((cp)->cbase);\
obj_header_t *end = (obj_header_t *)((cp)->cbot);\
uint size;\
@@ -224,7 +224,7 @@ extern_st(st_chunk);
}\
}\
if ( pre != end )\
- { lprintf2("Chunk parsing error, 0x%lx != 0x%lx\n",\
+ { lprintf2("Clump parsing error, 0x%lx != 0x%lx\n",\
(ulong)pre, (ulong)end);\
/*gs_abort((const gs_memory_t *)NULL);*/ \
}\
@@ -233,59 +233,59 @@ extern_st(st_chunk);
# define END_OBJECTS_SCAN END_OBJECTS_SCAN_NO_ABORT
#endif
-/* Initialize a chunk. */
+/* Initialize a clump. */
/* This is exported for save/restore. */
-void alloc_init_chunk(chunk_t *, byte *, byte *, bool, chunk_t *);
+void alloc_init_clump(clump_t *, byte *, byte *, bool, clump_t *);
-/* Initialize the string freelists in a chunk. */
-void alloc_init_free_strings(chunk_t *);
+/* Initialize the string freelists in a clump. */
+void alloc_init_free_strings(clump_t *);
-/* Find the chunk for a pointer. */
-/* Note that ptr_is_within_chunk returns true even if the pointer */
-/* is in an inner chunk of the chunk being tested. */
-#define ptr_is_within_chunk(ptr, cp)\
+/* Find the clump for a pointer. */
+/* Note that ptr_is_within_clump returns true even if the pointer */
+/* is in an inner clump of the clump being tested. */
+#define ptr_is_within_clump(ptr, cp)\
PTR_BETWEEN((const byte *)(ptr), (cp)->cbase, (cp)->cend)
-#define ptr_is_in_inner_chunk(ptr, cp)\
+#define ptr_is_in_inner_clump(ptr, cp)\
((cp)->inner_count != 0 &&\
PTR_BETWEEN((const byte *)(ptr), (cp)->cbot, (cp)->ctop))
-#define ptr_is_in_chunk(ptr, cp)\
- (ptr_is_within_chunk(ptr, cp) && !ptr_is_in_inner_chunk(ptr, cp))
-typedef struct chunk_locator_s {
+#define ptr_is_in_clump(ptr, cp)\
+ (ptr_is_within_clump(ptr, cp) && !ptr_is_in_inner_clump(ptr, cp))
+typedef struct clump_locator_s {
gs_ref_memory_t *memory; /* for head & tail of chain */
- chunk_t *cp; /* one-element cache */
-} chunk_locator_t;
-bool chunk_locate_ptr(const void *, chunk_locator_t *);
+ clump_t *cp; /* one-element cache */
+} clump_locator_t;
+bool clump_locate_ptr(const void *, clump_locator_t *);
-#define chunk_locate(ptr, clp)\
- (((clp)->cp != 0 && ptr_is_in_chunk(ptr, (clp)->cp)) ||\
- chunk_locate_ptr(ptr, clp))
+#define clump_locate(ptr, clp)\
+ (((clp)->cp != 0 && ptr_is_in_clump(ptr, (clp)->cp)) ||\
+ clump_locate_ptr(ptr, clp))
-/* Close up the current chunk. */
+/* Close up the current clump. */
/* This is exported for save/restore and for the GC. */
-void alloc_close_chunk(gs_ref_memory_t * mem);
+void alloc_close_clump(gs_ref_memory_t * mem);
-/* Reopen the current chunk after a GC. */
-void alloc_open_chunk(gs_ref_memory_t * mem);
+/* Reopen the current clump after a GC. */
+void alloc_open_clump(gs_ref_memory_t * mem);
-/* Insert or remove a chunk in the address-ordered chain. */
+/* Insert or remove a clump in the address-ordered chain. */
/* These are exported for the GC. */
-void alloc_link_chunk(chunk_t *, gs_ref_memory_t *);
-void alloc_unlink_chunk(chunk_t *, gs_ref_memory_t *);
+void alloc_link_clump(clump_t *, gs_ref_memory_t *);
+void alloc_unlink_clump(clump_t *, gs_ref_memory_t *);
-/* Free a chunk. This is exported for save/restore and for the GC. */
-void alloc_free_chunk(chunk_t *, gs_ref_memory_t *);
+/* Free a clump. This is exported for save/restore and for the GC. */
+void alloc_free_clump(clump_t *, gs_ref_memory_t *);
-/* Print a chunk debugging message. */
+/* Print a clump debugging message. */
/* Unfortunately, the ANSI C preprocessor doesn't allow us to */
/* define the list of variables being printed as a macro. */
-#define dprintf_chunk_format\
+#define dprintf_clump_format\
"%s 0x%lx (0x%lx..0x%lx, 0x%lx..0x%lx..0x%lx)\n"
-#define dmprintf_chunk(mem, msg, cp)\
- dmprintf7(mem, dprintf_chunk_format,\
+#define dmprintf_clump(mem, msg, cp)\
+ dmprintf7(mem, dprintf_clump_format,\
msg, (ulong)(cp), (ulong)(cp)->cbase, (ulong)(cp)->cbot,\
(ulong)(cp)->ctop, (ulong)(cp)->climit, (ulong)(cp)->cend)
-#define if_debug_chunk(c, mem, msg, cp)\
- if_debug7m(c, mem,dprintf_chunk_format,\
+#define if_debug_clump(c, mem, msg, cp)\
+ if_debug7m(c, mem,dprintf_clump_format,\
msg, (ulong)(cp), (ulong)(cp)->cbase, (ulong)(cp)->cbot,\
(ulong)(cp)->ctop, (ulong)(cp)->climit, (ulong)(cp)->cend)
@@ -334,9 +334,9 @@ typedef struct ref_s ref;
struct gs_ref_memory_s {
/* The following are set at initialization time. */
gs_memory_common;
- uint chunk_size;
+ uint clump_size;
uint large_size; /* min size to give large object */
- /* its own chunk: must be */
+ /* its own clump: must be */
/* 1 mod obj_align_mod */
uint space; /* a_local, a_global, a_system */
# if IGC_PTR_STABILITY_CHECK
@@ -347,14 +347,14 @@ struct gs_ref_memory_s {
gs_memory_gc_status_t gc_status;
/* The following are updated dynamically. */
bool is_controlled; /* if true, this allocator doesn't manage */
- /* its own chunks */
+ /* its own clumps */
ulong limit; /* signal a VMerror when total */
/* allocated exceeds this */
- chunk_t *root; /* root of chunk splay tree */
- chunk_t cc; /* current chunk */
- chunk_t *pcc; /* where to store cc */
- chunk_locator_t cfreed; /* chunk where last object freed */
- ulong allocated; /* total size of all chunks */
+ clump_t *root; /* root of clump splay tree */
+ clump_t cc; /* current clump */
+ clump_t *pcc; /* where to store cc */
+ clump_locator_t cfreed; /* clump where last object freed */
+ ulong allocated; /* total size of all clumps */
/* allocated at this save level */
ulong gc_allocated; /* value of (allocated + */
/* previous_status.allocated) after last GC */
@@ -403,17 +403,17 @@ extern_st(st_ref_memory);
extern const gs_memory_procs_t gs_ref_memory_procs;
/*
- * Scan the chunks of an allocator:
- * SCAN_MEM_CHUNKS(mem, cp)
- * << code to process chunk cp >>
- * END_CHUNKS_SCAN
+ * Scan the clumps of an allocator:
+ * SCAN_MEM_CLUMPS(mem, cp)
+ * << code to process clump cp >>
+ * END_CLUMPS_SCAN
*/
-#define SCAN_MEM_CHUNKS(mem, cp)\
- { chunk_splay_walker sw;\
- chunk_t *cp;\
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw))\
+#define SCAN_MEM_CLUMPS(mem, cp)\
+ { clump_splay_walker sw;\
+ clump_t *cp;\
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw))\
{
-#define END_CHUNKS_SCAN\
+#define END_CLUMPS_SCAN\
}\
}
@@ -455,12 +455,12 @@ extern const dump_control_t dump_control_all;
/* contents. */
void debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control);
-/* Print the contents of a chunk with the given options. */
+/* Print the contents of a clump with the given options. */
/* Relevant options: all. */
-void debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_t * control);
-void debug_print_chunk(const gs_memory_t *mem, const chunk_t * cp); /* default options */
+void debug_dump_clump(const gs_memory_t *mem, const clump_t * cp, const dump_control_t * control);
+void debug_print_clump(const gs_memory_t *mem, const clump_t * cp); /* default options */
-/* Print the contents of all chunks managed by an allocator. */
+/* Print the contents of all clumps managed by an allocator. */
/* Relevant options: all. */
void debug_dump_memory(const gs_ref_memory_t *mem,
const dump_control_t *control);
@@ -474,26 +474,26 @@ void debug_find_pointers(const gs_ref_memory_t *mem, const void *target);
#endif /* DEBUG */
-/* Routines for walking/manipulating the splay tree of chunks */
+/* Routines for walking/manipulating the splay tree of clumps */
enum {
SPLAY_APP_CONTINUE = 0,
SPLAY_APP_STOP = 1
};
-chunk_t *chunk_splay_app(chunk_t *root, gs_ref_memory_t *imem, int (*fn)(chunk_t *, void *), void *arg);
+clump_t *clump_splay_app(clump_t *root, gs_ref_memory_t *imem, int (*fn)(clump_t *, void *), void *arg);
typedef struct
{
int from;
- chunk_t *cp;
-} chunk_splay_walker;
+ clump_t *cp;
+} clump_splay_walker;
-chunk_t *chunk_splay_walk_bwd(chunk_splay_walker *sw);
+clump_t *clump_splay_walk_bwd(clump_splay_walker *sw);
-chunk_t *chunk_splay_walk_fwd(chunk_splay_walker *sw);
+clump_t *clump_splay_walk_fwd(clump_splay_walker *sw);
-chunk_t *chunk_splay_walk_init(chunk_splay_walker *sw, const gs_ref_memory_t *imem);
+clump_t *clump_splay_walk_init(clump_splay_walker *sw, const gs_ref_memory_t *imem);
-chunk_t *chunk_splay_walk_init_mid(chunk_splay_walker *sw, chunk_t *cp);
+clump_t *clump_splay_walk_init_mid(clump_splay_walker *sw, clump_t *cp);
#endif /* gxalloc_INCLUDED */
diff --git a/base/gxobj.h b/base/gxobj.h
index 86599ad22..302162c96 100644
--- a/base/gxobj.h
+++ b/base/gxobj.h
@@ -199,9 +199,9 @@ struct obj_header_s { /* must be a struct because of forward reference */
/*
* Define the header that free objects point back to when relocating.
- * Every chunk, including inner chunks, has one of these.
+ * Every clump, including inner clumps, has one of these.
*/
-typedef struct chunk_head_s {
+typedef struct clump_head_s {
byte *dest; /* destination for objects */
#if obj_align_mod > ARCH_SIZEOF_PTR
byte *_pad[obj_align_mod / ARCH_SIZEOF_PTR - 1];
@@ -209,6 +209,6 @@ typedef struct chunk_head_s {
obj_header_t free; /* header for a free object, */
/* in case the first real object */
/* is in use */
-} chunk_head_t;
+} clump_head_t;
#endif /* gxobj_INCLUDED */
diff --git a/doc/Develop.htm b/doc/Develop.htm
index 786bf2721..b1900a467 100644
--- a/doc/Develop.htm
+++ b/doc/Develop.htm
@@ -3787,15 +3787,15 @@ Files:
<p>
The standard Ghostscript allocator gets storage from its parent (normally
the <code>malloc</code> allocator) in large blocks called
-<em>chunks</em>, and then allocates objects up from the low end and strings
+<em>clumps</em>, and then allocates objects up from the low end and strings
down from the high end. Large objects or strings are allocated in their own
-chunk.
+clump.
<p>
The standard allocator maintains a set of free-block lists for small object
sizes, one list per size (rounded up to the word size), plus a free-block
list for large objects (but not for objects so large that they get their own
-chunk: when such an object is freed, its chunk is returned to the parent).
+clump: when such an object is freed, its chunk is returned to the parent).
The lists are not sorted; adjacent blocks are only merged if needed.
<p>
@@ -3875,7 +3875,7 @@ Using a separate memory manager object for each composite object would waste
a lot of space for object headers. Therefore, the interpreter's memory
manager packs multiple composite objects (also called "ref-containing
objects") into a single memory manager object, similar to the way the memory
-manager packs multiple objects into a chunk (see <a
+manager packs multiple objects into a clump (see <a
href="#Standard_implementation">above</a>). See <a
href="../base/gxalloc.h">base/gxalloc.h</a> for details. This memory manager
object has a structure descriptor, like all other memory manager objects.
diff --git a/doc/Use.htm b/doc/Use.htm
index c5f9a8a1e..3d43930dd 100644
--- a/doc/Use.htm
+++ b/doc/Use.htm
@@ -3692,7 +3692,7 @@ usage.
<dt>&nbsp;&nbsp;&nbsp;<code>3</code><dd>curve subdivider/rasterizer, detail
<dt><code>4</code><dd>garbage collector (strings)
<dt>&nbsp;&nbsp;&nbsp;<code>5</code><dd>garbage collector (strings, detail)
-<dt><code>6</code><dd>garbage collector (chunks, roots)
+<dt><code>6</code><dd>garbage collector (clumps, roots)
<dt>&nbsp;&nbsp;&nbsp;<code>7</code><dd>garbage collector (objects)
<dt>&nbsp;&nbsp;&nbsp;<code>8</code><dd>garbage collector (refs)
<dt>&nbsp;&nbsp;&nbsp;<code>9</code><dd>garbage collector (pointers)
diff --git a/psi/ialloc.c b/psi/ialloc.c
index 6e6002580..ef1e8d34a 100644
--- a/psi/ialloc.c
+++ b/psi/ialloc.c
@@ -186,7 +186,7 @@ gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs,
* - Large chunk: pcc unchanged, end != cc.cbot.
* - New chunk: pcc changed.
*/
- chunk_t *pcc = mem->pcc;
+ clump_t *pcc = mem->pcc;
ref *end;
alloc_change_t *cp = 0;
int code = 0;
@@ -213,11 +213,11 @@ gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs,
/* Large chunk. */
/* This happens only for very large arrays, */
/* so it doesn't need to be cheap. */
- chunk_locator_t cl;
+ clump_locator_t cl;
cl.memory = mem;
cl.cp = mem->root;
- chunk_locate_ptr(obj, &cl);
+ clump_locate_ptr(obj, &cl);
cl.cp->has_refs = true;
}
if (cp) {
@@ -309,11 +309,11 @@ gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname)
/* See if this array has a chunk all to itself. */
/* We only make this check when freeing very large objects, */
/* so it doesn't need to be cheap. */
- chunk_locator_t cl;
+ clump_locator_t cl;
cl.memory = mem;
cl.cp = mem->root;
- if (chunk_locate_ptr(obj, &cl) &&
+ if (clump_locate_ptr(obj, &cl) &&
obj == (ref *) ((obj_header_t *) (cl.cp->cbase) + 1) &&
(byte *) (obj + (num_refs + 1)) == cl.cp->cend
) {
@@ -323,7 +323,7 @@ gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname)
num_refs, (ulong) obj);
if ((gs_memory_t *)mem != mem->stable_memory)
alloc_save_remove(mem, (ref_packed *)obj, "gs_free_ref_array");
- alloc_free_chunk(cl.cp, mem);
+ alloc_free_clump(cl.cp, mem);
return;
}
}
diff --git a/psi/igc.c b/psi/igc.c
index 04b57dd3f..c36bb3e8b 100644
--- a/psi/igc.c
+++ b/psi/igc.c
@@ -67,17 +67,17 @@ struct gc_mark_stack_s {
/* Forward references */
static void gc_init_mark_stack(gc_mark_stack *, uint);
-static void gc_objects_clear_marks(const gs_memory_t *mem, chunk_t *);
+static void gc_objects_clear_marks(const gs_memory_t *mem, clump_t *);
static void gc_unmark_names(name_table *, op_array_table *, op_array_table *);
static int gc_trace(gs_gc_root_t *, gc_state_t *, gc_mark_stack *);
-static int gc_rescan_chunk(chunk_t *, gc_state_t *, gc_mark_stack *);
-static int gc_trace_chunk(const gs_memory_t *mem, chunk_t *, gc_state_t *, gc_mark_stack *);
+static int gc_rescan_clump(clump_t *, gc_state_t *, gc_mark_stack *);
+static int gc_trace_clump(const gs_memory_t *mem, clump_t *, gc_state_t *, gc_mark_stack *);
static bool gc_trace_finish(gc_state_t *);
-static void gc_clear_reloc(chunk_t *);
-static void gc_objects_set_reloc(gc_state_t * gcst, chunk_t *);
-static void gc_do_reloc(chunk_t *, gs_ref_memory_t *, gc_state_t *);
-static void gc_objects_compact(chunk_t *, gc_state_t *);
-static void gc_free_empty_chunks(gs_ref_memory_t *);
+static void gc_clear_reloc(clump_t *);
+static void gc_objects_set_reloc(gc_state_t * gcst, clump_t *);
+static void gc_do_reloc(clump_t *, gs_ref_memory_t *, gc_state_t *);
+static void gc_objects_compact(clump_t *, gc_state_t *);
+static void gc_free_empty_clumps(gs_ref_memory_t *);
/* Forward references for pointer types */
static ptr_proc_unmark(ptr_struct_unmark);
@@ -167,7 +167,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
int min_collect_vm_space; /* min VM space to collect */
int ispace;
gs_ref_memory_t *mem;
- chunk_t *cp;
+ clump_t *cp;
gs_gc_root_t *rp;
gc_state_t state;
struct _msd {
@@ -176,7 +176,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
} ms_default;
gc_mark_stack *mark_stack = &ms_default.stack;
const gs_memory_t *cmem;
- chunk_splay_walker sw;
+ clump_splay_walker sw;
/* Optionally force global GC for debugging. */
@@ -211,14 +211,14 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
for (i = min_collect; i <= max_trace; ++i)
#define for_space_mems(i, mem)\
for (mem = space_memories[i]; mem != 0; mem = &mem->saved->state)
-#define for_mem_chunks(mem, cp, sw)\
- for (cp = chunk_splay_walk_init(sw, mem); cp != 0; cp = chunk_splay_walk_fwd(sw))
-#define for_space_chunks(i, mem, cp, sw)\
- for_space_mems(i, mem) for_mem_chunks(mem, cp, sw)
-#define for_chunks(i, n, mem, cp, sw)\
- for_spaces(i, n) for_space_chunks(i, mem, cp, sw)
-#define for_collected_chunks(i, mem, cp, sw)\
- for_collected_spaces(i) for_space_chunks(i, mem, cp, sw)
+#define for_mem_clumps(mem, cp, sw)\
+ for (cp = clump_splay_walk_init(sw, mem); cp != 0; cp = clump_splay_walk_fwd(sw))
+#define for_space_clumps(i, mem, cp, sw)\
+ for_space_mems(i, mem) for_mem_clumps(mem, cp, sw)
+#define for_clumps(i, n, mem, cp, sw)\
+ for_spaces(i, n) for_space_clumps(i, mem, cp, sw)
+#define for_collected_clumps(i, mem, cp, sw)\
+ for_collected_spaces(i) for_space_clumps(i, mem, cp, sw)
#define for_roots(i, n, mem, rp)\
for_spaces(i, n)\
for (mem = space_memories[i], rp = mem->roots; rp != 0; rp = rp->next)
@@ -263,15 +263,15 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* Clear marks in spaces to be collected. */
for_collected_spaces(ispace)
- for_space_chunks(ispace, mem, cp, &sw) {
+ for_space_clumps(ispace, mem, cp, &sw) {
gc_objects_clear_marks((const gs_memory_t *)mem, cp);
gc_strings_set_marks(cp, false);
}
- end_phase(state.heap,"clear chunk marks");
+ end_phase(state.heap,"clear clump marks");
/* Clear the marks of roots. We must do this explicitly, */
- /* since some roots are not in any chunk. */
+ /* since some roots are not in any clump. */
for_roots(ispace, max_trace, mem, rp) {
enum_ptr_t eptr;
@@ -301,7 +301,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
{
gc_mark_stack *end = mark_stack;
- for_chunks(ispace, max_trace, mem, cp, &sw) {
+ for_clumps(ispace, max_trace, mem, cp, &sw) {
uint avail = cp->ctop - cp->cbot;
if (avail >= sizeof(gc_mark_stack) + sizeof(ms_entry) *
@@ -338,18 +338,18 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
end_phase(state.heap,"mark");
- /* If this is a local GC, mark from non-local chunks. */
+ /* If this is a local GC, mark from non-local clumps. */
if (!global)
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
- more |= gc_trace_chunk((const gs_memory_t *)mem, cp, &state, mark_stack);
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
+ more |= gc_trace_clump((const gs_memory_t *)mem, cp, &state, mark_stack);
/* Handle mark stack overflow. */
while (more < 0) { /* stack overflowed */
more = 0;
- for_chunks(ispace, max_trace, mem, cp, &sw)
- more |= gc_rescan_chunk(cp, &state, mark_stack);
+ for_clumps(ispace, max_trace, mem, cp, &sw)
+ more |= gc_rescan_clump(cp, &state, mark_stack);
}
end_phase(state.heap,"mark overflow");
@@ -398,17 +398,17 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* We have to clear the marks first, because we want the */
/* relocation to wind up as o_untraced, not o_unmarked. */
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
gc_objects_clear_marks((const gs_memory_t *)mem, cp);
end_phase(state.heap,"post-clear marks");
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
gc_clear_reloc(cp);
end_phase(state.heap,"clear reloc");
- /* Set the relocation of roots outside any chunk to o_untraced, */
+ /* Set the relocation of roots outside any clump to o_untraced, */
/* so we won't try to relocate pointers to them. */
/* (Currently, there aren't any.) */
@@ -425,7 +425,7 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* we are going to compact. Also finalize freed objects. */
state.cur_mem = (gs_memory_t *)mem;
- for_collected_chunks(ispace, mem, cp, &sw) {
+ for_collected_clumps(ispace, mem, cp, &sw) {
gc_objects_set_reloc(&state, cp);
gc_strings_set_reloc(cp);
}
@@ -443,13 +443,13 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
/* Relocate pointers. */
state.relocating_untraced = true;
- for_chunks(ispace, min_collect - 1, mem, cp, &sw)
+ for_clumps(ispace, min_collect - 1, mem, cp, &sw)
gc_do_reloc(cp, mem, &state);
state.relocating_untraced = false;
- for_collected_chunks(ispace, mem, cp, &sw)
+ for_collected_clumps(ispace, mem, cp, &sw)
gc_do_reloc(cp, mem, &state);
- end_phase(state.heap,"relocate chunks");
+ end_phase(state.heap,"relocate clumps");
for_roots(ispace, max_trace, mem, rp) {
if_debug3m('6', (const gs_memory_t *)mem,
@@ -474,11 +474,11 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
for_collected_spaces(ispace) {
for_space_mems(ispace, mem) {
- for_mem_chunks(mem, cp, &sw) {
- if_debug_chunk('6', (const gs_memory_t *)mem, "[6]compacting chunk", cp);
+ for_mem_clumps(mem, cp, &sw) {
+ if_debug_clump('6', (const gs_memory_t *)mem, "[6]compacting clump", cp);
gc_objects_compact(cp, &state);
gc_strings_compact(cp, cmem);
- if_debug_chunk('6', (const gs_memory_t *)mem, "[6]after compaction:", cp);
+ if_debug_clump('6', (const gs_memory_t *)mem, "[6]after compaction:", cp);
if (mem->pcc == cp)
mem->cc = *cp;
}
@@ -489,18 +489,18 @@ gs_gc_reclaim(vm_spaces * pspaces, bool global)
end_phase(state.heap,"compact");
- /* Free empty chunks. */
+ /* Free empty clumps. */
for_collected_spaces(ispace) {
for_space_mems(ispace, mem) {
- gc_free_empty_chunks(mem);
+ gc_free_empty_clumps(mem);
}
}
- end_phase(state.heap,"free empty chunks");
+ end_phase(state.heap,"free empty clumps");
/*
- * Update previous_status to reflect any freed chunks,
+ * Update previous_status to reflect any freed clumps,
* and set inherited to the negative of allocated,
* so it has no effect. We must update previous_status by
* working back-to-front along the save chain, using pointer reversal.
@@ -602,12 +602,12 @@ ptr_name_index_unmark(enum_ptr_t *pep, gc_state_t * gcst)
/* Do nothing */
}
-/* Unmark the objects in a chunk. */
+/* Unmark the objects in a clump. */
static void
-gc_objects_clear_marks(const gs_memory_t *mem, chunk_t * cp)
+gc_objects_clear_marks(const gs_memory_t *mem, clump_t * cp)
{
- if_debug_chunk('6', mem, "[6]unmarking chunk", cp);
- SCAN_CHUNK_OBJECTS(cp)
+ if_debug_clump('6', mem, "[6]unmarking clump", cp);
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
struct_proc_clear_marks((*proc)) =
pre->o_type->clear_marks;
@@ -658,10 +658,10 @@ gc_init_mark_stack(gc_mark_stack * pms, uint count)
pms->entries[0].is_refs = false;
}
-/* Mark starting from all marked objects in the interval of a chunk */
+/* Mark starting from all marked objects in the interval of a clump */
/* needing rescanning. */
static int
-gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
+gc_rescan_clump(clump_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
{
byte *sbot = cp->rescan_bot;
byte *stop = cp->rescan_top;
@@ -673,10 +673,10 @@ gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
if (sbot > stop)
return 0;
root.p = &comp;
- if_debug_chunk('6', mem, "[6]rescanning chunk", cp);
+ if_debug_clump('6', mem, "[6]rescanning clump", cp);
cp->rescan_bot = cp->cend;
cp->rescan_top = cp->cbase;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if ((byte *) (pre + 1) + size < sbot);
else if ((byte *) (pre + 1) > stop)
@@ -724,11 +724,11 @@ gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
return more;
}
-/* Mark starting from all the objects in a chunk. */
+/* Mark starting from all the objects in a clump. */
/* We assume that pstate->min_collect > avm_system, */
/* so we don't have to trace names. */
static int
-gc_trace_chunk(const gs_memory_t *mem, chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
+gc_trace_clump(const gs_memory_t *mem, clump_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack)
{
gs_gc_root_t root;
void *comp;
@@ -736,8 +736,8 @@ gc_trace_chunk(const gs_memory_t *mem, chunk_t * cp, gc_state_t * pstate, gc_mar
int min_trace = pstate->min_collect;
root.p = &comp;
- if_debug_chunk('6', mem, "[6]marking from chunk", cp);
- SCAN_CHUNK_OBJECTS(cp)
+ if_debug_clump('6', mem, "[6]marking from clump", cp);
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
{
if_debug2m('7', mem, " [7]scanning/marking 0x%lx(%lu)\n",
@@ -1036,7 +1036,7 @@ gc_trace(gs_gc_root_t * rp, gc_state_t * pstate, gc_mark_stack * pmstack)
return new;
}
/* Link to, attempting to allocate if necessary, */
-/* another chunk of mark stack. */
+/* another clump of mark stack. */
static int
gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate)
{
@@ -1055,7 +1055,7 @@ gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate)
if (pms->next == 0) { /* The mark stack overflowed. */
ms_entry *sp = pms->entries + pms->count - 1;
byte *cptr = sp->ptr; /* container */
- chunk_t *cp = gc_locate(cptr, pstate);
+ clump_t *cp = gc_locate(cptr, pstate);
int new = 1;
if (cp == 0) { /* We were tracing outside collectible */
@@ -1137,27 +1137,27 @@ gc_trace_finish(gc_state_t * pstate)
/* ------ Relocation planning phase ------ */
-/* Initialize the relocation information in the chunk header. */
+/* Initialize the relocation information in the clump header. */
static void
-gc_init_reloc(chunk_t * cp)
+gc_init_reloc(clump_t * cp)
{
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
chead->dest = cp->cbase;
chead->free.o_back =
- offset_of(chunk_head_t, free) >> obj_back_shift;
+ offset_of(clump_head_t, free) >> obj_back_shift;
chead->free.o_size = sizeof(obj_header_t);
chead->free.o_nreloc = 0;
}
-/* Set marks and clear relocation for chunks that won't be compacted. */
+/* Set marks and clear relocation for clumps that won't be compacted. */
static void
-gc_clear_reloc(chunk_t * cp)
+gc_clear_reloc(clump_t * cp)
{
byte *pfree = (byte *) & cp->chead->free;
gc_init_reloc(cp);
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
const struct_shared_procs_t *procs =
pre->o_type->shared;
@@ -1171,18 +1171,18 @@ gc_clear_reloc(chunk_t * cp)
gc_strings_clear_reloc(cp);
}
-/* Set the relocation for the objects in a chunk. */
-/* This will never be called for a chunk with any o_untraced objects. */
+/* Set the relocation for the objects in a clump. */
+/* This will never be called for a clump with any o_untraced objects. */
static void
-gc_objects_set_reloc(gc_state_t * gcst, chunk_t * cp)
+gc_objects_set_reloc(gc_state_t * gcst, clump_t * cp)
{
size_t reloc = 0;
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
byte *pfree = (byte *) & chead->free; /* most recent free object */
- if_debug_chunk('6', gcst->heap, "[6]setting reloc for chunk", cp);
+ if_debug_clump('6', gcst->heap, "[6]setting reloc for clump", cp);
gc_init_reloc(cp);
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
struct_proc_finalize((*finalize));
const struct_shared_procs_t *procs =
@@ -1211,21 +1211,21 @@ gc_objects_set_reloc(gc_state_t * gcst, chunk_t * cp)
#ifdef DEBUG
if (reloc != 0) {
if_debug1m('6', gcst->heap, "[6]freed %u", (unsigned int)reloc);
- if_debug_chunk('6', gcst->heap, " in", cp);
+ if_debug_clump('6', gcst->heap, " in", cp);
}
#endif
}
/* ------ Relocation phase ------ */
-/* Relocate the pointers in all the objects in a chunk. */
+/* Relocate the pointers in all the objects in a clump. */
static void
-gc_do_reloc(chunk_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate)
+gc_do_reloc(clump_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate)
{
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
- if_debug_chunk('6', (const gs_memory_t *)mem, "[6]relocating in chunk", cp);
- SCAN_CHUNK_OBJECTS(cp)
+ if_debug_clump('6', (const gs_memory_t *)mem, "[6]relocating in clump", cp);
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
#ifdef DEBUG
pstate->container = cp;
@@ -1233,7 +1233,7 @@ gc_do_reloc(chunk_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate)
/* We need to relocate the pointers in an object iff */
/* it is o_untraced, or it is a useful object. */
/* An object is free iff its back pointer points to */
- /* the chunk_head structure. */
+ /* the clump_head structure. */
if (o_is_untraced(pre) ||
pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead
) {
@@ -1285,7 +1285,7 @@ igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst)
else {
#ifdef DEBUG
/* Do some sanity checking. */
- chunk_t *cp = gcst->container;
+ clump_t *cp = gcst->container;
if (cp != 0 && cp->cbase <= (byte *)obj && (byte *)obj <cp->ctop) {
if (back > (cp->ctop - cp->cbase) >> obj_back_shift) {
@@ -1294,14 +1294,14 @@ igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst)
gs_abort(NULL);
}
} else {
- /* Pointed to unknown chunk. Can't check it, sorry. */
+ /* Pointed to unknown clump. Can't check it, sorry. */
}
#endif
{
const obj_header_t *pfree = (const obj_header_t *)
((const char *)(optr - 1) -
(back << obj_back_shift));
- const chunk_head_t *chead = (const chunk_head_t *)
+ const clump_head_t *chead = (const clump_head_t *)
((const char *)pfree -
(pfree->o_back << obj_back_shift));
@@ -1322,19 +1322,19 @@ igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst)
/* ------ Compaction phase ------ */
-/* Compact the objects in a chunk. */
-/* This will never be called for a chunk with any o_untraced objects. */
+/* Compact the objects in a clump. */
+/* This will never be called for a clump with any o_untraced objects. */
static void
-gc_objects_compact(chunk_t * cp, gc_state_t * gcst)
+gc_objects_compact(clump_t * cp, gc_state_t * gcst)
{
- chunk_head_t *chead = cp->chead;
+ clump_head_t *chead = cp->chead;
obj_header_t *dpre = (obj_header_t *) chead->dest;
const gs_memory_t *cmem = gcst->spaces.memories.named.system->stable_memory;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
/* An object is free iff its back pointer points to */
- /* the chunk_head structure. */
+ /* the clump_head structure. */
if (pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead) {
const struct_shared_procs_t *procs = pre->o_type->shared;
@@ -1354,7 +1354,7 @@ gc_objects_compact(chunk_t * cp, gc_state_t * gcst)
}
END_OBJECTS_SCAN
if (cp->outer == 0 && chead->dest != cp->cbase)
- dpre = (obj_header_t *) cp->cbase; /* compacted this chunk into another */
+ dpre = (obj_header_t *) cp->cbase; /* compacted this clump into another */
gs_alloc_fill(dpre, gs_alloc_fill_collected, cp->cbot - (byte *) dpre);
cp->cbot = (byte *) dpre;
cp->rcur = 0;
@@ -1364,27 +1364,27 @@ gc_objects_compact(chunk_t * cp, gc_state_t * gcst)
/* ------ Cleanup ------ */
static int
-free_if_empty(chunk_t *cp, void *arg)
+free_if_empty(clump_t *cp, void *arg)
{
gs_ref_memory_t * mem = (gs_ref_memory_t *)arg;
if (cp->cbot == cp->cbase && cp->ctop == cp->climit &&
cp->outer == 0 && cp->inner_count == 0)
{
- alloc_free_chunk(cp, mem);
+ alloc_free_clump(cp, mem);
if (mem->pcc == cp)
mem->pcc = 0;
}
return SPLAY_APP_CONTINUE;
}
-/* Free empty chunks. */
+/* Free empty clumps. */
static void
-gc_free_empty_chunks(gs_ref_memory_t * mem)
+gc_free_empty_clumps(gs_ref_memory_t * mem)
{
/* NOTE: Not in reverse order any more, so potentially
* not quite as good for crap allocators. */
- chunk_splay_app(mem->root, mem, free_if_empty, mem);
+ clump_splay_app(mem->root, mem, free_if_empty, mem);
}
const gs_memory_t * gcst_get_memory_ptr(gc_state_t *gcst)
diff --git a/psi/igc.h b/psi/igc.h
index a01e73796..807053ccc 100644
--- a/psi/igc.h
+++ b/psi/igc.h
@@ -59,7 +59,7 @@ typedef struct name_table_s name_table;
#endif
struct gc_state_s {
const gc_procs_with_refs_t *procs; /* must be first */
- chunk_locator_t loc;
+ clump_locator_t loc;
vm_spaces spaces;
int min_collect; /* avm_space */
bool relocating_untraced; /* if true, we're relocating */
@@ -68,7 +68,7 @@ struct gc_state_s {
name_table *ntable; /* (implicitly referenced by names) */
gs_memory_t *cur_mem;
#ifdef DEBUG
- chunk_t *container;
+ clump_t *container;
#endif
};
@@ -79,8 +79,8 @@ ptr_proc_mark(ptr_ref_mark);
/* Exported by ilocate.c for igc.c */
void ialloc_validate_memory(const gs_ref_memory_t *, gc_state_t *);
-void ialloc_validate_chunk(const chunk_t *, gc_state_t *);
-void ialloc_validate_object(const obj_header_t *, const chunk_t *,
+void ialloc_validate_clump(const clump_t *, gc_state_t *);
+void ialloc_validate_object(const obj_header_t *, const clump_t *,
gc_state_t *);
/* Exported by igc.c for ilocate.c */
diff --git a/psi/igcstr.c b/psi/igcstr.c
index 1789bebb9..0694c7a98 100644
--- a/psi/igcstr.c
+++ b/psi/igcstr.c
@@ -24,11 +24,11 @@
#include "igc.h"
/* Forward references */
-static bool gc_mark_string(const byte *, uint, bool, const chunk_t *);
+static bool gc_mark_string(const byte *, uint, bool, const clump_t *);
-/* (Un)mark the strings in a chunk. */
+/* (Un)mark the strings in a clump. */
void
-gc_strings_set_marks(chunk_t * cp, bool mark)
+gc_strings_set_marks(clump_t * cp, bool mark)
{
if (cp->smark != 0) {
if_debug3('6', "[6]clearing string marks 0x%lx[%u] to %d\n",
@@ -59,9 +59,9 @@ typedef string_mark_unit bword;
# define bword_swap_bytes(m) DO_NOTHING
#endif
-/* (Un)mark a string in a known chunk. Return true iff any new marks. */
+/* (Un)mark a string in a known clump. Return true iff any new marks. */
static bool
-gc_mark_string(const byte * ptr, uint size, bool set, const chunk_t * cp)
+gc_mark_string(const byte * ptr, uint size, bool set, const clump_t * cp)
{
uint offset = (ptr - HDR_ID_OFFSET) - cp->sbase;
bword *bp = (bword *) (cp->smark + ((offset & -bword_bits) >> 3));
@@ -130,7 +130,7 @@ dmfwrite(const gs_memory_t *mem, const byte *ptr, uint count)
bool
gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
{
- const chunk_t *cp;
+ const clump_t *cp;
bool marks;
if (size == 0)
@@ -138,12 +138,12 @@ gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
#define dmprintstr(mem)\
dmputc(mem, '('); dmfwrite(mem, ptr - HDR_ID_OFFSET, min(size, 20));\
dmputs(mem, (size <= 20 ? ")" : "...)"))
- if (!(cp = gc_locate(ptr - HDR_ID_OFFSET, gcst))) { /* not in a chunk */
+ if (!(cp = gc_locate(ptr - HDR_ID_OFFSET, gcst))) { /* not in a clump */
#ifdef DEBUG
if (gs_debug_c('5')) {
dmlprintf2(gcst->heap, "[5]0x%lx[%u]", (ulong) ptr - HDR_ID_OFFSET, size);
dmprintstr(gcst->heap);
- dmputs(gcst->heap, " not in a chunk\n");
+ dmputs(gcst->heap, " not in a clump\n");
}
#endif
return false;
@@ -156,17 +156,17 @@ gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
(ulong) ptr - HDR_ID_OFFSET, size, (ulong) cp->ctop, (ulong) cp->climit);
return false;
} else if (ptr + size > cp->climit) { /*
- * If this is the bottommost string in a chunk that has
- * an inner chunk, the string's starting address is both
- * cp->ctop of the outer chunk and cp->climit of the inner;
+ * If this is the bottommost string in a clump that has
+ * an inner clump, the string's starting address is both
+ * cp->ctop of the outer clump and cp->climit of the inner;
* gc_locate may incorrectly attribute the string to the
- * inner chunk because of this. This doesn't affect
+ * inner clump because of this. This doesn't affect
* marking or relocation, since the machinery for these
- * is all associated with the outermost chunk,
+ * is all associated with the outermost clump,
* but it can cause the validity check to fail.
* Check for this case now.
*/
- const chunk_t *scp = cp;
+ const clump_t *scp = cp;
while (ptr - HDR_ID_OFFSET == scp->climit && scp->outer != 0)
scp = scp->outer;
@@ -194,7 +194,7 @@ gc_string_mark(const byte * ptr, uint size, bool set, gc_state_t * gcst)
/* Clear the relocation for strings. */
/* This requires setting the marks. */
void
-gc_strings_clear_reloc(chunk_t * cp)
+gc_strings_clear_reloc(clump_t * cp)
{
if (cp->sreloc != 0) {
gc_strings_set_marks(cp, true);
@@ -218,11 +218,11 @@ static const byte count_zero_bits_table[256] =
#define byte_count_one_bits(byt)\
(uint)(8 - count_zero_bits_table[byt])
-/* Set the relocation for the strings in a chunk. */
+/* Set the relocation for the strings in a clump. */
/* The sreloc table stores the relocated offset from climit for */
/* the beginning of each block of string_data_quantum characters. */
void
-gc_strings_set_reloc(chunk_t * cp)
+gc_strings_set_reloc(clump_t * cp)
{
if (cp->sreloc != 0 && cp->smark != 0) {
byte *bot = cp->ctop;
@@ -280,7 +280,7 @@ void
igc_reloc_string(gs_string * sptr, gc_state_t * gcst)
{
byte *ptr;
- const chunk_t *cp;
+ const clump_t *cp;
uint offset;
uint reloc;
const byte *bitp;
@@ -293,7 +293,7 @@ igc_reloc_string(gs_string * sptr, gc_state_t * gcst)
ptr = sptr->data;
ptr -= HDR_ID_OFFSET;
- if (!(cp = gc_locate(ptr, gcst))) /* not in a chunk */
+ if (!(cp = gc_locate(ptr, gcst))) /* not in a clump */
return;
if (cp->sreloc == 0 || cp->smark == 0) /* not marking strings */
return;
@@ -339,9 +339,9 @@ igc_reloc_param_string(gs_param_string * sptr, gc_state_t * gcst)
}
}
-/* Compact the strings in a chunk. */
+/* Compact the strings in a clump. */
void
-gc_strings_compact(chunk_t * cp, const gs_memory_t *mem)
+gc_strings_compact(clump_t * cp, const gs_memory_t *mem)
{
if (cp->smark != 0) {
byte *hi = cp->climit;
diff --git a/psi/igcstr.h b/psi/igcstr.h
index 4bf0fbc93..c0f14c58d 100644
--- a/psi/igcstr.h
+++ b/psi/igcstr.h
@@ -20,14 +20,14 @@
# define igcstr_INCLUDED
/* Exported by ilocate.c for igcstr.c */
-chunk_t *gc_locate(const void *, gc_state_t *);
+clump_t *gc_locate(const void *, gc_state_t *);
/* Exported by igcstr.c for igc.c */
-void gc_strings_set_marks(chunk_t *, bool);
+void gc_strings_set_marks(clump_t *, bool);
bool gc_string_mark(const byte *, uint, bool, gc_state_t *);
-void gc_strings_clear_reloc(chunk_t *);
-void gc_strings_set_reloc(chunk_t *);
-void gc_strings_compact(chunk_t *, const gs_memory_t *);
+void gc_strings_clear_reloc(clump_t *);
+void gc_strings_set_reloc(clump_t *);
+void gc_strings_compact(clump_t *, const gs_memory_t *);
string_proc_reloc(igc_reloc_string);
const_string_proc_reloc(igc_reloc_const_string);
param_string_proc_reloc(igc_reloc_param_string);
diff --git a/psi/ilocate.c b/psi/ilocate.c
index 8fd3d7262..f5d41a4d6 100644
--- a/psi/ilocate.c
+++ b/psi/ilocate.c
@@ -32,23 +32,23 @@
#include "store.h"
#ifdef DEBUG
-static int do_validate_chunk(const chunk_t * cp, gc_state_t * gcst);
-static int do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+static int do_validate_clump(const clump_t * cp, gc_state_t * gcst);
+static int do_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst);
#endif
/* ================ Locating ================ */
-/* Locate a pointer in the chunks of a space being collected. */
+/* Locate a pointer in the clumps of a space being collected. */
/* This is only used for string garbage collection and for debugging. */
-chunk_t *
+clump_t *
gc_locate(const void *ptr, gc_state_t * gcst)
{
const gs_ref_memory_t *mem;
const gs_ref_memory_t *other;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
mem = gcst->loc.memory;
@@ -62,7 +62,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
) {
gcst->loc.memory = other;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
@@ -76,13 +76,13 @@ gc_locate(const void *ptr, gc_state_t * gcst)
gcst->loc.memory = other =
(mem->space == avm_local ? gcst->space_global : gcst->space_local);
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
/* Try its stable allocator. */
if (other->stable_memory != (const gs_memory_t *)other) {
gcst->loc.memory = (gs_ref_memory_t *)other->stable_memory;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
gcst->loc.memory = other;
}
@@ -90,7 +90,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
while (gcst->loc.memory->saved != 0) {
gcst->loc.memory = &gcst->loc.memory->saved->state;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
}
@@ -103,7 +103,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
if (mem != gcst->space_system) {
gcst->loc.memory = gcst->space_system;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
@@ -119,7 +119,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
if (other->stable_memory != (const gs_memory_t *)other) {
gcst->loc.memory = (gs_ref_memory_t *)other->stable_memory;
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
gcst->loc.memory = other;
@@ -134,7 +134,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
for (;;) {
if (gcst->loc.memory != mem) { /* don't do twice */
gcst->loc.cp = 0;
- if (chunk_locate(ptr, &gcst->loc))
+ if (clump_locate(ptr, &gcst->loc))
return gcst->loc.cp;
}
if (gcst->loc.memory->saved == 0)
@@ -155,7 +155,7 @@ gc_locate(const void *ptr, gc_state_t * gcst)
/* Define the structure for temporarily saving allocator state. */
typedef struct alloc_temp_save_s {
- chunk_t cc;
+ clump_t cc;
uint rsize;
ref rlast;
} alloc_temp_save_t;
@@ -163,7 +163,7 @@ typedef struct alloc_temp_save_s {
static void
alloc_temp_save(alloc_temp_save_t *pats, gs_ref_memory_t *mem)
{
- chunk_t *pcc = mem->pcc;
+ clump_t *pcc = mem->pcc;
obj_header_t *rcur = mem->cc.rcur;
if (pcc != 0) {
@@ -182,7 +182,7 @@ alloc_temp_save(alloc_temp_save_t *pats, gs_ref_memory_t *mem)
static void
alloc_temp_restore(alloc_temp_save_t *pats, gs_ref_memory_t *mem)
{
- chunk_t *pcc = mem->pcc;
+ clump_t *pcc = mem->pcc;
obj_header_t *rcur = mem->cc.rcur;
if (rcur != 0) {
@@ -248,15 +248,15 @@ ialloc_validate_memory(const gs_ref_memory_t * mem, gc_state_t * gcst)
for (smem = mem, level = 0; smem != 0;
smem = &smem->saved->state, --level
) {
- chunk_splay_walker sw;
- const chunk_t *cp;
+ clump_splay_walker sw;
+ const clump_t *cp;
int i;
if_debug3m('6', (gs_memory_t *)mem, "[6]validating memory 0x%lx, space %d, level %d\n",
(ulong) mem, mem->space, level);
- /* Validate chunks. */
- for (cp = chunk_splay_walk_init(&sw, smem); cp != 0; cp = chunk_splay_walk_fwd(&sw))
- if (do_validate_chunk(cp, gcst)) {
+ /* Validate clumps. */
+ for (cp = clump_splay_walk_init(&sw, smem); cp != 0; cp = clump_splay_walk_fwd(&sw))
+ if (do_validate_clump(cp, gcst)) {
mlprintf3((gs_memory_t *)mem, "while validating memory 0x%lx, space %d, level %d\n",
(ulong) mem, mem->space, level);
gs_abort(gcst->heap);
@@ -290,13 +290,13 @@ ialloc_validate_memory(const gs_ref_memory_t * mem, gc_state_t * gcst)
/* Check the validity of an object's size. */
static inline bool
-object_size_valid(const obj_header_t * pre, uint size, const chunk_t * cp)
+object_size_valid(const obj_header_t * pre, uint size, const clump_t * cp)
{
return (pre->o_alone ? (const byte *)pre == cp->cbase :
size <= cp->ctop - (const byte *)(pre + 1));
}
-/* Validate all the objects in a chunk. */
+/* Validate all the objects in a clump. */
#if IGC_PTR_STABILITY_CHECK
void ialloc_validate_pointer_stability(const obj_header_t * ptr_from,
const obj_header_t * ptr_to);
@@ -307,21 +307,21 @@ static int ialloc_validate_ref(const ref *, gc_state_t *);
static int ialloc_validate_ref_packed(const ref_packed *, gc_state_t *);
#endif
static int
-do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
+do_validate_clump(const clump_t * cp, gc_state_t * gcst)
{
int ret = 0;
- if_debug_chunk('6', gcst->heap, "[6]validating chunk", cp);
- SCAN_CHUNK_OBJECTS(cp);
+ if_debug_clump('6', gcst->heap, "[6]validating clump", cp);
+ SCAN_CLUMP_OBJECTS(cp);
DO_ALL
if (pre->o_type == &st_free) {
if (!object_size_valid(pre, size, cp)) {
- lprintf3("Bad free object 0x%lx(%lu), in chunk 0x%lx!\n",
+ lprintf3("Bad free object 0x%lx(%lu), in clump 0x%lx!\n",
(ulong) (pre + 1), (ulong) size, (ulong) cp);
return 1;
}
} else if (do_validate_object(pre + 1, cp, gcst)) {
- dmprintf_chunk(gcst->heap, "while validating chunk", cp);
+ dmprintf_clump(gcst->heap, "while validating clump", cp);
return 1;
}
if_debug3m('7', gcst->heap, " [7]validating %s(%lu) 0x%lx\n",
@@ -341,7 +341,7 @@ do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
mlprintf3(gcst->heap, "while validating %s(%lu) 0x%lx\n",
struct_type_name_string(pre->o_type),
(ulong) size, (ulong) pre);
- dmprintf_chunk(gcst->heap, "in chunk", cp);
+ dmprintf_clump(gcst->heap, "in clump", cp);
return ret;
}
rp = packed_next(rp);
@@ -372,7 +372,7 @@ do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
ret = ialloc_validate_ref_packed(eptr.ptr, gcst);
# endif
if (ret) {
- dmprintf_chunk(gcst->heap, "while validating chunk", cp);
+ dmprintf_clump(gcst->heap, "while validating clump", cp);
return ret;
}
}
@@ -382,9 +382,9 @@ do_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
}
void
-ialloc_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
+ialloc_validate_clump(const clump_t * cp, gc_state_t * gcst)
{
- if (do_validate_chunk(cp, gcst))
+ if (do_validate_clump(cp, gcst))
gs_abort(gcst->heap);
}
@@ -476,7 +476,7 @@ cks: if (optr != 0) {
if (r_space(&sref) != avm_foreign &&
!gc_locate(sref.value.const_bytes, gcst)
) {
- lprintf4("At 0x%lx, bad name %u, pname = 0x%lx, string 0x%lx not in any chunk\n",
+ lprintf4("At 0x%lx, bad name %u, pname = 0x%lx, string 0x%lx not in any clump\n",
(ulong) pref, (uint) r_size(pref),
(ulong) pref->value.pname,
(ulong) sref.value.const_bytes);
@@ -486,7 +486,7 @@ cks: if (optr != 0) {
break;
case t_string:
if (r_size(pref) != 0 && !gc_locate(pref->value.bytes, gcst)) {
- lprintf3("At 0x%lx, string ptr 0x%lx[%u] not in any chunk\n",
+ lprintf3("At 0x%lx, string ptr 0x%lx[%u] not in any clump\n",
(ulong) pref, (ulong) pref->value.bytes,
(uint) r_size(pref));
ret = 1;
@@ -499,7 +499,7 @@ cks: if (optr != 0) {
size = r_size(pref);
tname = "array";
cka: if (!gc_locate(rptr, gcst)) {
- lprintf3("At 0x%lx, %s 0x%lx not in any chunk\n",
+ lprintf3("At 0x%lx, %s 0x%lx not in any clump\n",
(ulong) pref, tname, (ulong) rptr);
ret = 1;
break;
@@ -523,7 +523,7 @@ cka: if (!gc_locate(rptr, gcst)) {
break;
optr = pref->value.packed;
if (!gc_locate(optr, gcst)) {
- lprintf2("At 0x%lx, packed array 0x%lx not in any chunk\n",
+ lprintf2("At 0x%lx, packed array 0x%lx not in any clump\n",
(ulong) pref, (ulong) optr);
ret = 1;
}
@@ -575,7 +575,7 @@ ialloc_validate_pointer_stability(const obj_header_t * ptr_fr,
/* Validate an object. */
static int
-do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+do_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst)
{
const obj_header_t *pre = ptr - 1;
@@ -590,13 +590,13 @@ do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
st = *gcst; /* no side effects! */
if (!(cp = gc_locate(pre, &st))) {
- mlprintf1(gcst->heap, "Object 0x%lx not in any chunk!\n",
+ mlprintf1(gcst->heap, "Object 0x%lx not in any clump!\n",
(ulong) ptr);
return 1; /*gs_abort(); */
}
}
if (otype == &st_free) {
- mlprintf3(gcst->heap, "Reference to free object 0x%lx(%lu), in chunk 0x%lx!\n",
+ mlprintf3(gcst->heap, "Reference to free object 0x%lx(%lu), in clump 0x%lx!\n",
(ulong) ptr, (ulong) size, (ulong) cp);
return 1;
}
@@ -608,7 +608,7 @@ do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
) {
mlprintf2(gcst->heap, "Bad object 0x%lx(%lu),\n",
(ulong) ptr, (ulong) size);
- dmprintf2(gcst->heap, " ssize = %u, in chunk 0x%lx!\n",
+ dmprintf2(gcst->heap, " ssize = %u, in clump 0x%lx!\n",
otype->ssize, (ulong) cp);
return 1;
}
@@ -616,7 +616,7 @@ do_validate_object(const obj_header_t * ptr, const chunk_t * cp,
}
void
-ialloc_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+ialloc_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst)
{
if (do_validate_object(ptr, cp, gcst))
@@ -635,12 +635,12 @@ ialloc_validate_memory(const gs_ref_memory_t * mem, gc_state_t * gcst)
}
void
-ialloc_validate_chunk(const chunk_t * cp, gc_state_t * gcst)
+ialloc_validate_clump(const clump_t * cp, gc_state_t * gcst)
{
}
void
-ialloc_validate_object(const obj_header_t * ptr, const chunk_t * cp,
+ialloc_validate_object(const obj_header_t * ptr, const clump_t * cp,
gc_state_t * gcst)
{
}
diff --git a/psi/ireclaim.c b/psi/ireclaim.c
index 362d99604..d80523842 100644
--- a/psi/ireclaim.c
+++ b/psi/ireclaim.c
@@ -128,7 +128,7 @@ gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
/****** ABORT IF code < 0 ******/
for (i = nmem; --i >= 0; )
- alloc_close_chunk(memories[i]);
+ alloc_close_clump(memories[i]);
/* Prune the file list so it won't retain potentially collectible */
/* files. */
@@ -172,16 +172,16 @@ gs_vmreclaim(gs_dual_memory_t *dmem, bool global)
dicts_gc_cleanup();
- /* Reopen the active chunks. */
+ /* Reopen the active clumps. */
for (i = 0; i < nmem; ++i)
- alloc_open_chunk(memories[i]);
+ alloc_open_clump(memories[i]);
/* Reload the context state. Note this should be done
- AFTER the chunks are reopened, since the context state
+ AFTER the clumps are reopened, since the context state
load could do allocations that must remain.
- If it were done while the chunks were still closed,
- we would lose those allocations when the chunks were opened */
+ If it were done while the clumps were still closed,
+ we would lose those allocations when the clumps were opened */
code = context_state_load(i_ctx_p);
return code;
diff --git a/psi/isave.c b/psi/isave.c
index 0a39e5af4..f37423e80 100644
--- a/psi/isave.c
+++ b/psi/isave.c
@@ -39,9 +39,9 @@ private_st_alloc_save();
/* see below for details. */
static const long max_repeated_scan = 100000;
-/* Define the minimum space for creating an inner chunk. */
-/* Must be at least sizeof(chunk_head_t). */
-static const long min_inner_chunk_space = sizeof(chunk_head_t) + 500;
+/* Define the minimum space for creating an inner clump. */
+/* Must be at least sizeof(clump_head_t). */
+static const long min_inner_clump_space = sizeof(clump_head_t) + 500;
/*
* The logic for saving and restoring the state is complex.
@@ -51,23 +51,23 @@ static const long min_inner_chunk_space = sizeof(chunk_head_t) + 500;
/*
* To save the state of the memory manager:
- * Save the state of the current chunk in which we are allocating.
- * Shrink all chunks to their inner unallocated region.
+ * Save the state of the current clump in which we are allocating.
+ * Shrink all clumps to their inner unallocated region.
* Save and reset the free block chains.
* By doing this, we guarantee that no object older than the save
* can be freed.
*
* To restore the state of the memory manager:
- * Free all chunks newer than the save, and the descriptors for
- * the inner chunks created by the save.
- * Make current the chunk that was current at the time of the save.
- * Restore the state of the current chunk.
+ * Free all clumps newer than the save, and the descriptors for
+ * the inner clumps created by the save.
+ * Make current the clump that was current at the time of the save.
+ * Restore the state of the current clump.
*
* In addition to save ("start transaction") and restore ("abort transaction"),
* we support forgetting a save ("commit transation"). To forget a save:
- * Reassign to the next outer save all chunks newer than the save.
- * Free the descriptors for the inners chunk, updating their outer
- * chunks to reflect additional allocations in the inner chunks.
+ * Reassign to the next outer save all clumps newer than the save.
+ * Free the descriptors for the inners clump, updating their outer
+ * clumps to reflect additional allocations in the inner clumps.
* Concatenate the free block chains with those of the outer save.
*/
@@ -316,7 +316,7 @@ alloc_free_save(gs_ref_memory_t *mem, alloc_save_t *save, const char *scn)
gs_ref_memory_t save_mem;
save_mem = mem->saved->state;
gs_free_object((gs_memory_t *)mem, save, scn);
- /* Free any inner chunk structures. This is the easiest way to do it. */
+ /* Free any inner clump structures. This is the easiest way to do it. */
restore_free(mem);
/* Restore the 'saved' state - this pulls our object off the linked
* list of states. Without this we hit a SEGV in the gc later. */
@@ -403,44 +403,44 @@ alloc_save_space(gs_ref_memory_t * mem, gs_dual_memory_t * dmem, ulong sid)
{
gs_ref_memory_t save_mem;
alloc_save_t *save;
- chunk_t *cp;
- chunk_t *new_pcc = 0;
- chunk_splay_walker sw;
+ clump_t *cp;
+ clump_t *new_pcc = 0;
+ clump_splay_walker sw;
save_mem = *mem;
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
mem->pcc = 0;
gs_memory_status((gs_memory_t *) mem, &mem->previous_status);
ialloc_reset(mem);
- /* Create inner chunks wherever it's worthwhile. */
+ /* Create inner clumps wherever it's worthwhile. */
- for (cp = chunk_splay_walk_init(&sw, &save_mem); cp != 0; cp = chunk_splay_walk_fwd(&sw)) {
- if (cp->ctop - cp->cbot > min_inner_chunk_space) {
- /* Create an inner chunk to cover only the unallocated part. */
- chunk_t *inner =
- gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_chunk,
+ for (cp = clump_splay_walk_init(&sw, &save_mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) {
+ if (cp->ctop - cp->cbot > min_inner_clump_space) {
+ /* Create an inner clump to cover only the unallocated part. */
+ clump_t *inner =
+ gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_clump,
"alloc_save_space(inner)");
if (inner == 0)
break; /* maybe should fail */
- alloc_init_chunk(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp);
- alloc_link_chunk(inner, mem);
- if_debug2m('u', (gs_memory_t *)mem, "[u]inner chunk: cbot=0x%lx ctop=0x%lx\n",
+ alloc_init_clump(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp);
+ alloc_link_clump(inner, mem);
+ if_debug2m('u', (gs_memory_t *)mem, "[u]inner clump: cbot=0x%lx ctop=0x%lx\n",
(ulong) inner->cbot, (ulong) inner->ctop);
if (cp == save_mem.pcc)
new_pcc = inner;
}
}
mem->pcc = new_pcc;
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
save = gs_alloc_struct((gs_memory_t *) mem, alloc_save_t,
&st_alloc_save, "alloc_save_space(save)");
if_debug2m('u', (gs_memory_t *)mem, "[u]save space %u at 0x%lx\n",
mem->space, (ulong) save);
if (save == 0) {
- /* Free the inner chunk structures. This is the easiest way. */
+ /* Free the inner clump structures. This is the easiest way. */
restore_free(mem);
*mem = save_mem;
return 0;
@@ -611,12 +611,12 @@ alloc_save_current(const gs_dual_memory_t * dmem)
bool
alloc_is_since_save(const void *vptr, const alloc_save_t * save)
{
- /* A reference postdates a save iff it is in a chunk allocated */
- /* since the save (including any carried-over inner chunks). */
+ /* A reference postdates a save iff it is in a clump allocated */
+ /* since the save (including any carried-over inner clumps). */
const char *const ptr = (const char *)vptr;
register const gs_ref_memory_t *mem = save->space_local;
- chunk_splay_walker sw;
+ clump_splay_walker sw;
if_debug2m('U', (gs_memory_t *)mem, "[U]is_since_save 0x%lx, 0x%lx:\n",
(ulong) ptr, (ulong) save);
@@ -624,15 +624,15 @@ alloc_is_since_save(const void *vptr, const alloc_save_t * save)
/* alloc_restore_all. */
return true;
}
- /* Check against chunks allocated since the save. */
+ /* Check against clumps allocated since the save. */
/* (There may have been intermediate saves as well.) */
for (;; mem = &mem->saved->state) {
- const chunk_t *cp;
+ const clump_t *cp;
if_debug1m('U', (gs_memory_t *)mem, "[U]checking mem=0x%lx\n", (ulong) mem);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw)) {
- if (ptr_is_within_chunk(ptr, cp)) {
- if_debug3m('U', (gs_memory_t *)mem, "[U+]in new chunk 0x%lx: 0x%lx, 0x%lx\n",
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) {
+ if (ptr_is_within_clump(ptr, cp)) {
+ if_debug3m('U', (gs_memory_t *)mem, "[U+]in new clump 0x%lx: 0x%lx, 0x%lx\n",
(ulong) cp,
(ulong) cp->cbase, (ulong) cp->cend);
return true;
@@ -656,12 +656,12 @@ alloc_is_since_save(const void *vptr, const alloc_save_t * save)
(mem = save->space_global) != save->space_local &&
save->space_global->num_contexts == 1
) {
- const chunk_t *cp;
+ const clump_t *cp;
if_debug1m('U', (gs_memory_t *)mem, "[U]checking global mem=0x%lx\n", (ulong) mem);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw))
- if (ptr_is_within_chunk(ptr, cp)) {
- if_debug3m('U', (gs_memory_t *)mem, "[U+] new chunk 0x%lx: 0x%lx, 0x%lx\n",
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw))
+ if (ptr_is_within_clump(ptr, cp)) {
+ if_debug3m('U', (gs_memory_t *)mem, "[U+] new clump 0x%lx: 0x%lx, 0x%lx\n",
(ulong) cp, (ulong) cp->cbase, (ulong) cp->cend);
return true;
}
@@ -844,7 +844,7 @@ restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem)
}
/* Free memory allocated since the save. */
- /* Note that this frees all chunks except the inner ones */
+ /* Note that this frees all clumps except the inner ones */
/* belonging to this level. */
saved = *save;
restore_free(mem);
@@ -856,7 +856,7 @@ restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem)
*mem = saved.state;
mem->num_contexts = num_contexts;
}
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
/* Make the allocator current if it was current before the save. */
if (saved.is_current) {
@@ -933,13 +933,13 @@ alloc_restore_all(gs_dual_memory_t * dmem)
static void
restore_finalize(gs_ref_memory_t * mem)
{
- chunk_t *cp;
- chunk_splay_walker sw;
+ clump_t *cp;
+ clump_splay_walker sw;
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
gs_enable_free((gs_memory_t *) mem, false);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_bwd(&sw)) {
- SCAN_CHUNK_OBJECTS(cp)
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_bwd(&sw)) {
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
struct_proc_finalize((*finalize)) =
pre->o_type->finalize;
@@ -983,7 +983,7 @@ restore_resources(alloc_save_t * sprev, gs_ref_memory_t * mem)
static void
restore_free(gs_ref_memory_t * mem)
{
- /* Free chunks allocated since the save. */
+ /* Free clumps allocated since the save. */
gs_free_all((gs_memory_t *) mem);
}
@@ -1047,31 +1047,31 @@ alloc_forget_save_in(gs_dual_memory_t *dmem, alloc_save_t * save)
while (sprev != save);
return 0;
}
-/* Combine the chunks of the next outer level with those of the current one, */
+/* Combine the clumps of the next outer level with those of the current one, */
/* and free the bookkeeping structures. */
static void
combine_space(gs_ref_memory_t * mem)
{
alloc_save_t *saved = mem->saved;
gs_ref_memory_t *omem = &saved->state;
- chunk_t *cp;
- chunk_splay_walker sw;
+ clump_t *cp;
+ clump_splay_walker sw;
- alloc_close_chunk(mem);
- for (cp = chunk_splay_walk_init(&sw, mem); cp != 0; cp = chunk_splay_walk_fwd(&sw)) {
+ alloc_close_clump(mem);
+ for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) {
if (cp->outer == 0)
- alloc_link_chunk(cp, omem);
+ alloc_link_clump(cp, omem);
else {
- chunk_t *outer = cp->outer;
+ clump_t *outer = cp->outer;
outer->inner_count--;
if (mem->pcc == cp)
mem->pcc = outer;
if (mem->cfreed.cp == cp)
mem->cfreed.cp = outer;
- /* "Free" the header of the inner chunk, */
+ /* "Free" the header of the inner clump, */
/* and any immediately preceding gap left by */
- /* the GC having compacted the outer chunk. */
+ /* the GC having compacted the outer clump. */
{
obj_header_t *hp = (obj_header_t *) outer->cbot;
@@ -1086,7 +1086,7 @@ combine_space(gs_ref_memory_t * mem)
hp + 1, "combine_space(header)");
#endif /* **************** */
}
- /* Update the outer chunk's allocation pointers. */
+ /* Update the outer clump's allocation pointers. */
outer->cbot = cp->cbot;
outer->rcur = cp->rcur;
outer->rtop = cp->rtop;
@@ -1125,7 +1125,7 @@ combine_space(gs_ref_memory_t * mem)
mem->largest_free_size = omem->largest_free_size;
}
gs_free_object((gs_memory_t *) mem, saved, "combine_space(saved)");
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
}
/* Free the changes chain for a level 0 .forgetsave, */
/* resetting the l_new flag in the changed refs. */
@@ -1268,11 +1268,11 @@ save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned
return code;
/* Handle newly allocated ref objects. */
- SCAN_MEM_CHUNKS(mem, cp) {
+ SCAN_MEM_CLUMPS(mem, cp) {
if (cp->has_refs) {
bool has_refs = false;
- SCAN_CHUNK_OBJECTS(cp)
+ SCAN_CLUMP_OBJECTS(cp)
DO_ALL
if_debug3m('U', (gs_memory_t *)mem, "[U]set_new scan(0x%lx(%u), %d)\n",
(ulong) pre, size, to_new);
@@ -1291,7 +1291,7 @@ save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned
cp->has_refs = has_refs;
}
}
- END_CHUNKS_SCAN
+ END_CLUMPS_SCAN
if_debug2m('u', (gs_memory_t *)mem, "[u]set_new (%s) scanned %ld\n",
(to_new ? "restore" : "save"), scanned);
*pscanned = scanned;
@@ -1305,14 +1305,14 @@ drop_redundant_changes(gs_ref_memory_t * mem)
register alloc_change_t *chp = mem->changes, *chp_back = NULL, *chp_forth;
/* As we are trying to throw away redundant changes in an allocator instance
- that has already been "saved", the active chunk has already been "closed"
+ that has already been "saved", the active clump has already been "closed"
by alloc_save_space(). Using such an allocator (for example, by calling
gs_free_object() with it) can leave it in an unstable state, causing
- problems for the garbage collector (specifically, the chunk validator code).
- So, before we might use it, open the current chunk, and then close it again
+ problems for the garbage collector (specifically, the clump validator code).
+ So, before we might use it, open the current clump, and then close it again
when we're done.
*/
- alloc_open_chunk(mem);
+ alloc_open_clump(mem);
/* First reverse the list and set all. */
for (; chp; chp = chp_forth) {
@@ -1356,7 +1356,7 @@ drop_redundant_changes(gs_ref_memory_t * mem)
}
mem->changes = chp_back;
- alloc_close_chunk(mem);
+ alloc_close_clump(mem);
}
/* Set or reset the l_new attribute on the changes chain. */
diff --git a/psi/zcontext.c b/psi/zcontext.c
index bda4b478d..aaf349e0c 100644
--- a/psi/zcontext.c
+++ b/psi/zcontext.c
@@ -248,7 +248,7 @@ context_reclaim(vm_spaces * pspaces, bool global)
gs_context_t *pctx = 0; /* = 0 is bogus to pacify compilers */
gs_scheduler_t *psched = 0;
gs_ref_memory_t *lmem = 0; /* = 0 is bogus to pacify compilers */
- chunk_locator_t loc;
+ clump_locator_t loc;
for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
gs_ref_memory_t *mem = pspaces->memories.indexed[i];
@@ -273,7 +273,7 @@ context_reclaim(vm_spaces * pspaces, bool global)
loc.cp = 0;
for (i = 0; i < CTX_TABLE_SIZE; ++i)
for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
- pctx->visible = chunk_locate_ptr(pctx, &loc);
+ pctx->visible = clump_locate_ptr(pctx, &loc);
#ifdef DEBUG
if (!psched->current->visible) {
@@ -617,8 +617,8 @@ do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
return_error(gs_error_Fatal);
old_userdict = *puserdict;
userdict_size = dict_maxlength(&old_userdict);
- lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
- lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
+ lmem = ialloc_alloc_state(parent, iimemory_local->clump_size);
+ lmem_stable = ialloc_alloc_state(parent, iimemory_local->clump_size);
if (lmem == 0 || lmem_stable == 0) {
gs_free_object(parent, lmem_stable, "do_fork");
gs_free_object(parent, lmem, "do_fork");