summaryrefslogtreecommitdiff
path: root/src/atomic_ops/sysdeps/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'src/atomic_ops/sysdeps/gcc')
-rw-r--r--src/atomic_ops/sysdeps/gcc/arm.h4
-rw-r--r--src/atomic_ops/sysdeps/gcc/avr32.h12
-rw-r--r--src/atomic_ops/sysdeps/gcc/hexagon.h2
-rw-r--r--src/atomic_ops/sysdeps/gcc/ia64.h4
-rw-r--r--src/atomic_ops/sysdeps/gcc/powerpc.h4
-rw-r--r--src/atomic_ops/sysdeps/gcc/x86.h4
-rw-r--r--src/atomic_ops/sysdeps/gcc/x86_64.h4
7 files changed, 29 insertions, 5 deletions
diff --git a/src/atomic_ops/sysdeps/gcc/arm.h b/src/atomic_ops/sysdeps/gcc/arm.h
index 4f14271..4496d9a 100644
--- a/src/atomic_ops/sysdeps/gcc/arm.h
+++ b/src/atomic_ops/sysdeps/gcc/arm.h
@@ -145,6 +145,7 @@ AO_INLINE void AO_store(volatile AO_t *addr, AO_t value)
interrupt latencies. LDREX, STREX are more flexible, other instructions
can be done between the LDREX and STREX accesses."
*/
+#ifndef AO_PREFER_GENERALIZED
#if !defined(AO_FORCE_USE_SWP) || defined(__thumb2__)
/* But, on the other hand, there could be a considerable performance */
/* degradation in case of a race. Eg., test_atomic.c executing */
@@ -238,6 +239,7 @@ AO_fetch_and_sub1(volatile AO_t *p)
return result;
}
#define AO_HAVE_fetch_and_sub1
+#endif /* !AO_PREFER_GENERALIZED */
/* NEC LE-IT: compare and swap */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
@@ -351,6 +353,8 @@ AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
#endif /* __ARM_ARCH_x */
#if !defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_test_and_set) \
+ && (!defined(AO_PREFER_GENERALIZED) \
+ || !defined(AO_HAVE_fetch_compare_and_swap)) \
&& !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_6M__)
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
diff --git a/src/atomic_ops/sysdeps/gcc/avr32.h b/src/atomic_ops/sysdeps/gcc/avr32.h
index c1bf56c..c5a12a3 100644
--- a/src/atomic_ops/sysdeps/gcc/avr32.h
+++ b/src/atomic_ops/sysdeps/gcc/avr32.h
@@ -28,9 +28,10 @@
#include "../test_and_set_t_is_ao_t.h"
-AO_INLINE AO_TS_VAL_t
-AO_test_and_set_full(volatile AO_TS_t *addr)
-{
+#ifndef AO_PREFER_GENERALIZED
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
register long ret;
__asm__ __volatile__(
@@ -40,8 +41,9 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
: "memory");
return (AO_TS_VAL_t)ret;
-}
-#define AO_HAVE_test_and_set_full
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
diff --git a/src/atomic_ops/sysdeps/gcc/hexagon.h b/src/atomic_ops/sysdeps/gcc/hexagon.h
index 38d73f9..da7eb4e 100644
--- a/src/atomic_ops/sysdeps/gcc/hexagon.h
+++ b/src/atomic_ops/sysdeps/gcc/hexagon.h
@@ -27,6 +27,7 @@ AO_nop_full(void)
/* The Hexagon has load-locked, store-conditional primitives, and so */
/* resulting code is very nearly identical to that of PowerPC. */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
@@ -67,6 +68,7 @@ AO_test_and_set(volatile AO_TS_t *addr)
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
+#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
diff --git a/src/atomic_ops/sysdeps/gcc/ia64.h b/src/atomic_ops/sysdeps/gcc/ia64.h
index 97e325e..88f4c90 100644
--- a/src/atomic_ops/sysdeps/gcc/ia64.h
+++ b/src/atomic_ops/sysdeps/gcc/ia64.h
@@ -61,6 +61,7 @@ AO_nop_full(void)
}
#define AO_HAVE_nop_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *addr)
{
@@ -108,6 +109,7 @@ AO_fetch_and_sub1_release (volatile AO_t *addr)
return result;
}
#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
@@ -202,6 +204,7 @@ AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
/* FIXME: Add compare_double_and_swap_double for the _ILP32 case. */
#else
+# ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add1_acquire(volatile unsigned int *addr)
{
@@ -245,6 +248,7 @@ AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
return result;
}
# define AO_HAVE_int_fetch_and_sub1_release
+# endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
diff --git a/src/atomic_ops/sysdeps/gcc/powerpc.h b/src/atomic_ops/sysdeps/gcc/powerpc.h
index e508724..275a277 100644
--- a/src/atomic_ops/sysdeps/gcc/powerpc.h
+++ b/src/atomic_ops/sysdeps/gcc/powerpc.h
@@ -102,6 +102,7 @@ AO_store_release(volatile AO_t *addr, AO_t value)
}
#define AO_HAVE_store_release
+#ifndef AO_PREFER_GENERALIZED
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
@@ -165,6 +166,7 @@ AO_test_and_set_full(volatile AO_TS_t *addr) {
return result;
}
#define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
@@ -294,6 +296,7 @@ AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
}
#define AO_HAVE_fetch_compare_and_swap_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
@@ -345,6 +348,7 @@ AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* Empty */
diff --git a/src/atomic_ops/sysdeps/gcc/x86.h b/src/atomic_ops/sysdeps/gcc/x86.h
index 11ac54b..0c7d703 100644
--- a/src/atomic_ops/sysdeps/gcc/x86.h
+++ b/src/atomic_ops/sysdeps/gcc/x86.h
@@ -56,6 +56,7 @@ AO_nop_full(void)
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
@@ -67,6 +68,7 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
@@ -92,6 +94,7 @@ AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
}
#define AO_HAVE_short_fetch_and_add_full
+#ifndef AO_PREFER_GENERALIZED
/* Really only works for 486 and later */
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
@@ -116,6 +119,7 @@ AO_xor_full (volatile AO_t *p, AO_t value)
"=m" (*p) : "r" (value), "m" (*p) : "memory");
}
#define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
diff --git a/src/atomic_ops/sysdeps/gcc/x86_64.h b/src/atomic_ops/sysdeps/gcc/x86_64.h
index 062eeb5..cf46d1e 100644
--- a/src/atomic_ops/sysdeps/gcc/x86_64.h
+++ b/src/atomic_ops/sysdeps/gcc/x86_64.h
@@ -42,6 +42,7 @@ AO_nop_full(void)
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
@@ -53,6 +54,7 @@ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
@@ -90,6 +92,7 @@ AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
}
#define AO_HAVE_int_fetch_and_add_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
{
@@ -113,6 +116,7 @@ AO_xor_full (volatile AO_t *p, AO_t value)
"=m" (*p) : "r" (value), "m" (*p) : "memory");
}
#define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)