summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2015-12-28 19:11:43 +0300
committerIvan Maidanski <ivmai@mail.ru>2015-12-28 19:11:43 +0300
commit7bf1d7d64fa8b06419d1a9ffef0fc2018dd249c9 (patch)
tree818817c99170be5b17eb6f2d22d344b7dfeda47b
parentd80a05927330b7148b3b13c9dc56b07024c4202d (diff)
downloadbdwgc-7bf1d7d64fa8b06419d1a9ffef0fc2018dd249c9.tar.gz
Prefix PREFETCH_FOR_WRITE with GC_ as used in gc_inline.h public header
* doc/porting.html: Rename PREFETCH_FOR_WRITE to GC_PREFETCH_FOR_WRITE. * fnlz_mlc.c (GC_finalized_malloc): Likewise. * include/gc_inline.h (GC_FAST_MALLOC_GRANS): Likewise. * include/private/gcconfig.h: Likewise. * new_hblk.c (GC_build_fl_clear4, GC_build_fl4, GC_build_fl): Likewise. * include/gc_inline.h (GC_PREFETCH_FOR_WRITE): Define to no-op if undefined.
-rw-r--r--doc/porting.html2
-rw-r--r--fnlz_mlc.c2
-rw-r--r--include/gc_inline.h6
-rw-r--r--include/private/gcconfig.h18
-rw-r--r--new_hblk.c12
5 files changed, 22 insertions, 18 deletions
diff --git a/doc/porting.html b/doc/porting.html
index f22f654e..751cbc59 100644
--- a/doc/porting.html
+++ b/doc/porting.html
@@ -181,7 +181,7 @@ allows incremental/generational garbage collection.
<TT>MPROTECT_VDB</tt> identifies modified pages by
write protecting the heap and catching faults.
<TT>PROC_VDB</tt> uses the /proc primitives to read dirty bits.
-<DT><TT>PREFETCH, PREFETCH_FOR_WRITE</tt>
+<DT><TT>PREFETCH, GC_PREFETCH_FOR_WRITE</tt>
<DD>
The collector uses <TT>PREFETCH</tt>(<I>x</i>) to preload the cache
with *<I>x</i>.
diff --git a/fnlz_mlc.c b/fnlz_mlc.c
index 4648ca96..1b168874 100644
--- a/fnlz_mlc.c
+++ b/fnlz_mlc.c
@@ -162,7 +162,7 @@ GC_API void GC_CALL GC_register_disclaim_proc(int kind, GC_disclaim_proc proc,
*my_fl = next;
obj_link(result) = 0;
*(word *)result = (word)fclos | 1;
- PREFETCH_FOR_WRITE(next);
+ GC_PREFETCH_FOR_WRITE(next);
return (word *)result + 1;
}
#endif /* THREAD_LOCAL_ALLOC */
diff --git a/include/gc_inline.h b/include/gc_inline.h
index 01d4b058..5c49e0e5 100644
--- a/include/gc_inline.h
+++ b/include/gc_inline.h
@@ -42,6 +42,10 @@
# define GC_ASSERT(expr) /* empty */
#endif
+#ifndef GC_PREFETCH_FOR_WRITE
+# define GC_PREFETCH_FOR_WRITE(x) (void)0
+#endif
+
/* Store a pointer to a list of newly allocated objects of kind k and */
/* size lb in *result. The caller must make sure that *result is */
/* traced even if objects are ptrfree. */
@@ -83,7 +87,7 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t /* lb */, int /* k */,
result = (void *)my_entry; \
*my_fl = next; \
init; \
- PREFETCH_FOR_WRITE(next); \
+ GC_PREFETCH_FOR_WRITE(next); \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == PTRFREE || ((GC_word *)result)[1] == 0); \
break; \
diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
index 80fb448c..ca42efa8 100644
--- a/include/private/gcconfig.h
+++ b/include/private/gcconfig.h
@@ -745,7 +745,7 @@
* An architecture may define PREFETCH(x) to preload the cache with *x.
* This defaults to GCC built-in operation (or a no-op for other compilers).
*
- * PREFETCH_FOR_WRITE(x) is used if *x is about to be written.
+ * GC_PREFETCH_FOR_WRITE(x) is used if *x is about to be written.
*
* An architecture may also define CLEAR_DOUBLE(x) to be a fast way to
* clear the two words at GC_malloc-aligned address x. By default,
@@ -939,7 +939,7 @@
/* The performance impact of prefetches is untested */
# define PREFETCH(x) \
__asm__ __volatile__ ("dcbt 0,%0" : : "r" ((const void *) (x)))
-# define PREFETCH_FOR_WRITE(x) \
+# define GC_PREFETCH_FOR_WRITE(x) \
__asm__ __volatile__ ("dcbtst 0,%0" : : "r" ((const void *) (x)))
# endif
/* There seems to be some issues with trylock hanging on darwin. */
@@ -1391,7 +1391,7 @@
# ifdef FORCE_WRITE_PREFETCH
/* Using prefetches for write seems to have a slight negative */
/* impact on performance, at least for a PIII/500. */
-# define PREFETCH_FOR_WRITE(x) \
+# define GC_PREFETCH_FOR_WRITE(x) \
__asm__ __volatile__ ("prefetcht0 %0" : : "m"(*(char *)(x)))
# else
# define NO_PREFETCH_FOR_WRITE
@@ -1399,7 +1399,7 @@
# elif defined(USE_3DNOW_PREFETCH)
# define PREFETCH(x) \
__asm__ __volatile__ ("prefetch %0" : : "m"(*(char *)(x)))
-# define PREFETCH_FOR_WRITE(x) \
+# define GC_PREFETCH_FOR_WRITE(x) \
__asm__ __volatile__ ("prefetchw %0" : : "m"(*(char *)(x)))
# endif
# if defined(__GLIBC__)
@@ -1969,14 +1969,14 @@
# ifndef __INTEL_COMPILER
# define PREFETCH(x) \
__asm__ (" lfetch [%0]": : "r"(x))
-# define PREFETCH_FOR_WRITE(x) \
+# define GC_PREFETCH_FOR_WRITE(x) \
__asm__ (" lfetch.excl [%0]": : "r"(x))
# define CLEAR_DOUBLE(x) \
__asm__ (" stf.spill [%0]=f0": : "r"((void *)(x)))
# else
# include <ia64intrin.h>
# define PREFETCH(x) __lfetch(__lfhint_none, (x))
-# define PREFETCH_FOR_WRITE(x) __lfetch(__lfhint_nta, (x))
+# define GC_PREFETCH_FOR_WRITE(x) __lfetch(__lfhint_nta, (x))
# define CLEAR_DOUBLE(x) __stf_spill((void *)(x), 0)
# endif /* __INTEL_COMPILER */
# endif
@@ -2815,11 +2815,11 @@
# endif
#endif
-#ifndef PREFETCH_FOR_WRITE
+#ifndef GC_PREFETCH_FOR_WRITE
# if defined(__GNUC__) && __GNUC__ >= 3 && !defined(NO_PREFETCH_FOR_WRITE)
-# define PREFETCH_FOR_WRITE(x) __builtin_prefetch((x), 1)
+# define GC_PREFETCH_FOR_WRITE(x) __builtin_prefetch((x), 1)
# else
-# define PREFETCH_FOR_WRITE(x) (void)0
+# define GC_PREFETCH_FOR_WRITE(x) (void)0
# endif
#endif
diff --git a/new_hblk.c b/new_hblk.c
index 05c4abff..ac6a118d 100644
--- a/new_hblk.c
+++ b/new_hblk.c
@@ -58,7 +58,7 @@
p[3] = 0;
p += 4;
for (; (word)p < (word)lim; p += 4) {
- PREFETCH_FOR_WRITE((ptr_t)(p+64));
+ GC_PREFETCH_FOR_WRITE((ptr_t)(p + 64));
p[0] = (word)(p-4);
p[1] = 0;
CLEAR_DOUBLE(p+2);
@@ -92,7 +92,7 @@
p[4] = (word)p;
p += 8;
for (; (word)p < (word)lim; p += 8) {
- PREFETCH_FOR_WRITE((ptr_t)(p+64));
+ GC_PREFETCH_FOR_WRITE((ptr_t)(p + 64));
p[0] = (word)(p-4);
p[4] = (word)p;
};
@@ -116,10 +116,10 @@ GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear,
/* If we were more serious about it, these should go inside */
/* the loops. But write prefetches usually don't seem to */
/* matter much. */
- PREFETCH_FOR_WRITE((ptr_t)h);
- PREFETCH_FOR_WRITE((ptr_t)h + 128);
- PREFETCH_FOR_WRITE((ptr_t)h + 256);
- PREFETCH_FOR_WRITE((ptr_t)h + 378);
+ GC_PREFETCH_FOR_WRITE((ptr_t)h);
+ GC_PREFETCH_FOR_WRITE((ptr_t)h + 128);
+ GC_PREFETCH_FOR_WRITE((ptr_t)h + 256);
+ GC_PREFETCH_FOR_WRITE((ptr_t)h + 378);
# ifndef SMALL_CONFIG
/* Handle small objects sizes more efficiently. For larger objects */
/* the difference is less significant. */