summaryrefslogtreecommitdiff
path: root/include/gc_inline.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/gc_inline.h')
-rw-r--r--include/gc_inline.h130
1 files changed, 65 insertions, 65 deletions
diff --git a/include/gc_inline.h b/include/gc_inline.h
index cf2e9f67..c97a5127 100644
--- a/include/gc_inline.h
+++ b/include/gc_inline.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
@@ -12,18 +12,18 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-
-/* WARNING: */
-/* Note that for these routines, it is the clients responsibility to */
+
+/* WARNING: */
+/* Note that for these routines, it is the clients responsibility to */
/* add the extra byte at the end to deal with one-past-the-end pointers.*/
-/* In the standard collector configuration, the collector assumes that */
-/* such a byte has been added, and hence does not trace the last word */
-/* in the resulting object. */
-/* This is not an issue if the collector is compiled with */
-/* -DDONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
-/* This interface is most useful for compilers that generate C. */
-/* It is also used internally for thread-local allocation. */
-/* Manual use is hereby discouraged. */
+/* In the standard collector configuration, the collector assumes that */
+/* such a byte has been added, and hence does not trace the last word */
+/* in the resulting object. */
+/* This is not an issue if the collector is compiled with */
+/* -DDONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
+/* This interface is most useful for compilers that generate C. */
+/* It is also used internally for thread-local allocation. */
+/* Manual use is hereby discouraged. */
#include "gc.h"
#include "gc_tiny_fl.h"
@@ -35,58 +35,58 @@
# define GC_EXPECT(expr, outcome) (expr)
#endif /* __GNUC__ */
-/* The ultimately general inline allocation macro. Allocate an object */
+/* The ultimately general inline allocation macro. Allocate an object */
/* of size granules, putting the resulting pointer in result. Tiny_fl */
-/* is a "tiny" free list array, which will be used first, if the size */
-/* is appropriate. If granules is too large, we allocate with */
-/* default_expr instead. If we need to refill the free list, we use */
-/* GC_generic_malloc_many with the indicated kind. */
-/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
-/* If num_direct is nonzero, and the individual free list pointers */
-/* are initialized to (void *)1, then we allocate numdirect granules */
-/* directly using gmalloc before putting multiple objects into the */
-/* tiny_fl entry. If num_direct is zero, then the free lists may also */
-/* be initialized to (void *)0. */
-/* Note that we use the zeroth free list to hold objects 1 granule in */
-/* size that are used to satisfy size 0 allocation requests. */
-/* We rely on much of this hopefully getting optimized away in the */
-/* num_direct = 0 case. */
-/* Particularly if granules is constant, this should generate a small */
-/* amount of code. */
+/* is a "tiny" free list array, which will be used first, if the size */
+/* is appropriate. If granules is too large, we allocate with */
+/* default_expr instead. If we need to refill the free list, we use */
+/* GC_generic_malloc_many with the indicated kind. */
+/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
+/* If num_direct is nonzero, and the individual free list pointers */
+/* are initialized to (void *)1, then we allocate numdirect granules */
+/* directly using gmalloc before putting multiple objects into the */
+/* tiny_fl entry. If num_direct is zero, then the free lists may also */
+/* be initialized to (void *)0. */
+/* Note that we use the zeroth free list to hold objects 1 granule in */
+/* size that are used to satisfy size 0 allocation requests. */
+/* We rely on much of this hopefully getting optimized away in the */
+/* num_direct = 0 case. */
+/* Particularly if granules is constant, this should generate a small */
+/* amount of code. */
# define GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,num_direct,\
- kind,default_expr,init) \
+ kind,default_expr,init) \
{ \
if (GC_EXPECT((granules) >= GC_TINY_FREELISTS,0)) { \
result = (default_expr); \
} else { \
- void **my_fl = (tiny_fl) + (granules); \
+ void **my_fl = (tiny_fl) + (granules); \
void *my_entry=*my_fl; \
- void *next; \
+ void *next; \
\
- while (GC_EXPECT((GC_word)my_entry \
- <= (num_direct) + GC_TINY_FREELISTS + 1, 0)) { \
- /* Entry contains counter or NULL */ \
- if ((GC_word)my_entry - 1 < (num_direct)) { \
- /* Small counter value, not NULL */ \
+ while (GC_EXPECT((GC_word)my_entry \
+ <= (num_direct) + GC_TINY_FREELISTS + 1, 0)) { \
+ /* Entry contains counter or NULL */ \
+ if ((GC_word)my_entry - 1 < (num_direct)) { \
+ /* Small counter value, not NULL */ \
*my_fl = (char *)my_entry + (granules) + 1; \
result = (default_expr); \
- goto out; \
+ goto out; \
} else { \
- /* Large counter or NULL */ \
+ /* Large counter or NULL */ \
GC_generic_malloc_many(((granules) == 0? GC_GRANULE_BYTES : \
- GC_RAW_BYTES_FROM_INDEX(granules)), \
- kind, my_fl); \
- my_entry = *my_fl; \
+ GC_RAW_BYTES_FROM_INDEX(granules)), \
+ kind, my_fl); \
+ my_entry = *my_fl; \
if (my_entry == 0) { \
- result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
- goto out; \
- } \
- } \
+ result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
+ goto out; \
+ } \
+ } \
} \
next = *(void **)(my_entry); \
result = (void *)my_entry; \
*my_fl = next; \
- init; \
+ init; \
PREFETCH_FOR_WRITE(next); \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == PTRFREE || ((GC_word *)result)[1] == 0); \
@@ -95,38 +95,38 @@
}
# define GC_WORDS_TO_WHOLE_GRANULES(n) \
- GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
+ GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
-/* Allocate n words (NOT BYTES). X is made to point to the result. */
-/* This should really only be used if GC_all_interior_pointers is */
-/* not set, or DONT_ADD_BYTE_AT_END is set. See above. */
-/* The semantics changed in version 7.0; we no longer lock, and */
-/* the caller is responsible for supplying a cleared tiny_fl */
-/* free list array. For single-threaded applications, this may be */
-/* a global array. */
+/* Allocate n words (NOT BYTES). X is made to point to the result. */
+/* This should really only be used if GC_all_interior_pointers is */
+/* not set, or DONT_ADD_BYTE_AT_END is set. See above. */
+/* The semantics changed in version 7.0; we no longer lock, and */
+/* the caller is responsible for supplying a cleared tiny_fl */
+/* free list array. For single-threaded applications, this may be */
+/* a global array. */
# define GC_MALLOC_WORDS(result,n,tiny_fl) \
-{ \
+{ \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
- NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
- *(void **)result = 0); \
+ NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
+ *(void **)result = 0); \
}
# define GC_MALLOC_ATOMIC_WORDS(result,n,tiny_fl) \
-{ \
+{ \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
- PTRFREE, GC_malloc_atomic(grans*GC_GRANULE_BYTES), \
- (void)0 /* no initialization */); \
+ PTRFREE, GC_malloc_atomic(grans*GC_GRANULE_BYTES), \
+ (void)0 /* no initialization */); \
}
/* And once more for two word initialized objects: */
# define GC_CONS(result, first, second, tiny_fl) \
-{ \
+{ \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(2); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
- NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
- *(void **)result = (void *)(first)); \
- ((void **)(result))[1] = (void *)(second); \
+ NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
+ *(void **)result = (void *)(first)); \
+ ((void **)(result))[1] = (void *)(second); \
}