summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2021-01-14 10:00:17 +0300
committerIvan Maidanski <ivmai@mail.ru>2021-09-09 08:33:28 +0300
commitbf7f4c00dab2e2fa270a18c9fe5996bc7aadcd32 (patch)
treef1850a91d0dd2441fa94d7e95a60011d2bd72d29
parent593ed4dd8855ea8c9c31bd99712a2818337d1b7a (diff)
downloadlibatomic_ops-bf7f4c00dab2e2fa270a18c9fe5996bc7aadcd32.tar.gz
Use __alignof__ instead of sizeof in atomic variable alignment assertions
* src/atomic_ops.h [(AO_GNUC_PREREQ(4,3)||__STDC_VERSION__>=201112L) && !LINT2] (AO_ALIGNOF_SUPPORTED): Define (means __alignof__ is supported). * src/atomic_ops.h (AO_ASSERT_ADDR_ALIGNED): New internal macro (depending on AO_ALIGNOF_SUPPORTED). * src/atomic_ops.h: Update copyright year. * src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h [AO_ALIGNOF_SUPPORTED] (AO_ACCESS_CHECK_ALIGNED): Define (regardless of __m68k__). * src/atomic_ops/sysdeps/loadstore/atomic_load.template (AO_XSIZE_load): Replace assert((addr&(sizeof(*addr)-1))==0) with AO_ASSERT_ADDR_ALIGNED(addr). * src/atomic_ops/sysdeps/loadstore/atomic_store.template (AO_XSIZE_store): Likewise. * src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h (AO_double_load, AO_double_store): Likewise. * src/atomic_ops/sysdeps/msftc/x86.h [AO_ASSUME_VISTA] (AO_double_compare_and_swap_full): Likewise. * src/atomic_ops/sysdeps/msftc/x86_64.h [AO_CMPXCHG16B_AVAILABLE && _MSC_VER>=1500] (AO_compare_double_and_swap_double_full): Likewise. * src/atomic_ops/sysdeps/loadstore/atomic_load.h: Regenerate. * src/atomic_ops/sysdeps/loadstore/atomic_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/char_atomic_load.h: Likewise. * src/atomic_ops/sysdeps/loadstore/char_atomic_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/int_atomic_load.h: Likewise. * src/atomic_ops/sysdeps/loadstore/int_atomic_store.h: Likewise. * src/atomic_ops/sysdeps/loadstore/short_atomic_load.h: Likewise. * src/atomic_ops/sysdeps/loadstore/short_atomic_store.h: Likewise.
-rw-r--r--src/atomic_ops.h14
-rw-r--r--src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/atomic_load.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/atomic_load.template2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/atomic_store.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/atomic_store.template2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/char_atomic_load.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/char_atomic_store.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h4
-rw-r--r--src/atomic_ops/sysdeps/loadstore/int_atomic_load.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/int_atomic_store.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/short_atomic_load.h2
-rw-r--r--src/atomic_ops/sysdeps/loadstore/short_atomic_store.h2
-rw-r--r--src/atomic_ops/sysdeps/msftc/x86.h2
-rw-r--r--src/atomic_ops/sysdeps/msftc/x86_64.h2
15 files changed, 28 insertions, 16 deletions
diff --git a/src/atomic_ops.h b/src/atomic_ops.h
index 1373a2d..bd8cf85 100644
--- a/src/atomic_ops.h
+++ b/src/atomic_ops.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2008-2018 Ivan Maidanski
+ * Copyright (c) 2008-2021 Ivan Maidanski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -231,6 +231,18 @@
# endif
#endif /* !AO_ATTR_NO_SANITIZE_THREAD */
+#if (AO_GNUC_PREREQ(4, 3) || __STDC_VERSION__ >= 201112L) && !defined(LINT2)
+# define AO_ALIGNOF_SUPPORTED 1
+#endif
+
+#ifdef AO_ALIGNOF_SUPPORTED
+# define AO_ASSERT_ADDR_ALIGNED(addr) \
+ assert(((size_t)(addr) & (__alignof__(*(addr)) - 1)) == 0)
+#else
+# define AO_ASSERT_ADDR_ALIGNED(addr) \
+ assert(((size_t)(addr) & (sizeof(*(addr)) - 1)) == 0)
+#endif /* !AO_ALIGNOF_SUPPORTED */
+
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
#elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
diff --git a/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h b/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h
index 97f31e1..7bcaa99 100644
--- a/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h
@@ -24,7 +24,7 @@
/* short, and unsigned int loads and stores are atomic but only if data */
/* is suitably aligned. */
-#if defined(__m68k__)
+#if defined(__m68k__) && !defined(AO_ALIGNOF_SUPPORTED)
/* Even though AO_t is redefined in m68k.h, some clients use AO */
/* pointer size primitives to access variables not declared as AO_t. */
/* Such variables may have 2-byte alignment, while their sizeof is 4. */
diff --git a/src/atomic_ops/sysdeps/loadstore/atomic_load.h b/src/atomic_ops/sysdeps/loadstore/atomic_load.h
index 8a08b40..38c23e4 100644
--- a/src/atomic_ops/sysdeps/loadstore/atomic_load.h
+++ b/src/atomic_ops/sysdeps/loadstore/atomic_load.h
@@ -28,7 +28,7 @@ AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
# ifdef AO_ACCESS_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
diff --git a/src/atomic_ops/sysdeps/loadstore/atomic_load.template b/src/atomic_ops/sysdeps/loadstore/atomic_load.template
index 02dadf5..26b7e4e 100644
--- a/src/atomic_ops/sysdeps/loadstore/atomic_load.template
+++ b/src/atomic_ops/sysdeps/loadstore/atomic_load.template
@@ -28,7 +28,7 @@ AO_INLINE XCTYPE
AO_XSIZE_load(const volatile XCTYPE *addr)
{
# ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
diff --git a/src/atomic_ops/sysdeps/loadstore/atomic_store.h b/src/atomic_ops/sysdeps/loadstore/atomic_store.h
index 23be715..9d5cf55 100644
--- a/src/atomic_ops/sysdeps/loadstore/atomic_store.h
+++ b/src/atomic_ops/sysdeps/loadstore/atomic_store.h
@@ -28,7 +28,7 @@ AO_INLINE void
AO_store(volatile AO_t *addr, AO_t new_val)
{
# ifdef AO_ACCESS_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(AO_t *)addr = new_val;
}
diff --git a/src/atomic_ops/sysdeps/loadstore/atomic_store.template b/src/atomic_ops/sysdeps/loadstore/atomic_store.template
index d111f2b..56bba45 100644
--- a/src/atomic_ops/sysdeps/loadstore/atomic_store.template
+++ b/src/atomic_ops/sysdeps/loadstore/atomic_store.template
@@ -28,7 +28,7 @@ AO_INLINE void
AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE new_val)
{
# ifdef AO_ACCESS_XSIZE_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(XCTYPE *)addr = new_val;
}
diff --git a/src/atomic_ops/sysdeps/loadstore/char_atomic_load.h b/src/atomic_ops/sysdeps/loadstore/char_atomic_load.h
index ef60a17..8927b7d 100644
--- a/src/atomic_ops/sysdeps/loadstore/char_atomic_load.h
+++ b/src/atomic_ops/sysdeps/loadstore/char_atomic_load.h
@@ -28,7 +28,7 @@ AO_INLINE unsigned/**/char
AO_char_load(const volatile unsigned/**/char *addr)
{
# ifdef AO_ACCESS_char_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
diff --git a/src/atomic_ops/sysdeps/loadstore/char_atomic_store.h b/src/atomic_ops/sysdeps/loadstore/char_atomic_store.h
index 4fd5b21..f1fa895 100644
--- a/src/atomic_ops/sysdeps/loadstore/char_atomic_store.h
+++ b/src/atomic_ops/sysdeps/loadstore/char_atomic_store.h
@@ -28,7 +28,7 @@ AO_INLINE void
AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char new_val)
{
# ifdef AO_ACCESS_char_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(unsigned/**/char *)addr = new_val;
}
diff --git a/src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h b/src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h
index 145a14c..85debd0 100644
--- a/src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h
+++ b/src/atomic_ops/sysdeps/loadstore/double_atomic_load_store.h
@@ -31,7 +31,7 @@ AO_double_load(const volatile AO_double_t *addr)
AO_double_t result;
# ifdef AO_ACCESS_double_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(AO_double_t) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile in case it adds fence semantics. */
result.AO_whole = ((const AO_double_t *)addr)->AO_whole;
@@ -43,7 +43,7 @@ AO_INLINE void
AO_double_store(volatile AO_double_t *addr, AO_double_t new_val)
{
# ifdef AO_ACCESS_double_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(AO_double_t) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
((AO_double_t *)addr)->AO_whole = new_val.AO_whole;
}
diff --git a/src/atomic_ops/sysdeps/loadstore/int_atomic_load.h b/src/atomic_ops/sysdeps/loadstore/int_atomic_load.h
index 6e9b49d..ecc4b3a 100644
--- a/src/atomic_ops/sysdeps/loadstore/int_atomic_load.h
+++ b/src/atomic_ops/sysdeps/loadstore/int_atomic_load.h
@@ -28,7 +28,7 @@ AO_INLINE unsigned
AO_int_load(const volatile unsigned *addr)
{
# ifdef AO_ACCESS_int_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
diff --git a/src/atomic_ops/sysdeps/loadstore/int_atomic_store.h b/src/atomic_ops/sysdeps/loadstore/int_atomic_store.h
index 04adac5..3c32b30 100644
--- a/src/atomic_ops/sysdeps/loadstore/int_atomic_store.h
+++ b/src/atomic_ops/sysdeps/loadstore/int_atomic_store.h
@@ -28,7 +28,7 @@ AO_INLINE void
AO_int_store(volatile unsigned *addr, unsigned new_val)
{
# ifdef AO_ACCESS_int_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(unsigned *)addr = new_val;
}
diff --git a/src/atomic_ops/sysdeps/loadstore/short_atomic_load.h b/src/atomic_ops/sysdeps/loadstore/short_atomic_load.h
index e351d99..2370540 100644
--- a/src/atomic_ops/sysdeps/loadstore/short_atomic_load.h
+++ b/src/atomic_ops/sysdeps/loadstore/short_atomic_load.h
@@ -28,7 +28,7 @@ AO_INLINE unsigned/**/short
AO_short_load(const volatile unsigned/**/short *addr)
{
# ifdef AO_ACCESS_short_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
/* Cast away the volatile for architectures like IA64 where */
/* volatile adds barrier (fence) semantics. */
diff --git a/src/atomic_ops/sysdeps/loadstore/short_atomic_store.h b/src/atomic_ops/sysdeps/loadstore/short_atomic_store.h
index ccfc691..b9a9dc6 100644
--- a/src/atomic_ops/sysdeps/loadstore/short_atomic_store.h
+++ b/src/atomic_ops/sysdeps/loadstore/short_atomic_store.h
@@ -28,7 +28,7 @@ AO_INLINE void
AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short new_val)
{
# ifdef AO_ACCESS_short_CHECK_ALIGNED
- assert(((size_t)addr & (sizeof(*addr) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
# endif
*(unsigned/**/short *)addr = new_val;
}
diff --git a/src/atomic_ops/sysdeps/msftc/x86.h b/src/atomic_ops/sysdeps/msftc/x86.h
index 3de9616..d290fc7 100644
--- a/src/atomic_ops/sysdeps/msftc/x86.h
+++ b/src/atomic_ops/sysdeps/msftc/x86.h
@@ -125,7 +125,7 @@ AO_test_and_set_full(volatile AO_TS_t *addr)
AO_double_compare_and_swap_full(volatile AO_double_t *addr,
AO_double_t old_val, AO_double_t new_val)
{
- assert(((size_t)addr & (sizeof(AO_double_t) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
return (double_ptr_storage)_InterlockedCompareExchange64(
(__int64 volatile *)addr,
new_val.AO_whole /* exchange */,
diff --git a/src/atomic_ops/sysdeps/msftc/x86_64.h b/src/atomic_ops/sysdeps/msftc/x86_64.h
index fead273..196fc65 100644
--- a/src/atomic_ops/sysdeps/msftc/x86_64.h
+++ b/src/atomic_ops/sysdeps/msftc/x86_64.h
@@ -278,7 +278,7 @@ AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
{
__int64 comparandResult[2];
- assert(((size_t)addr & (sizeof(AO_double_t) - 1)) == 0);
+ AO_ASSERT_ADDR_ALIGNED(addr);
comparandResult[0] = old_val1; /* low */
comparandResult[1] = old_val2; /* high */
return _InterlockedCompareExchange128((volatile __int64 *)addr,