summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYann Ylavic <ylavic@apache.org>2023-02-14 11:18:00 +0000
committerYann Ylavic <ylavic@apache.org>2023-02-14 11:18:00 +0000
commitf0fd0f04ce331c7b13d0541b94c9b941c5dfd6da (patch)
treefe4fc65dfd1651d9af56a22194278ccafba0cdb5
parent63e9becc4992c035949731dcfdeb191a7860ad8f (diff)
downloadapr-f0fd0f04ce331c7b13d0541b94c9b941c5dfd6da.tar.gz
configure: Test apr_uint64_t alignment for 64bit atomic builtins usability.
On some systems the __atomic builtins may be available only through libatomic or fall back to libatomic when the atomic operations are not issued on a suitably aligned address (64bit atomics on 8-byte aligned addresses only for instance). Modify the tests for HAVE_ATOMIC_BUILTINS64 and HAVE__ATOMIC_BUILTINS64 such that the address for the atomic operations is not aligned (unless 64bit ints always have the suitable alignment, i.e. mainly 64bit systems..). Also, use the __atomic_always_lock_free() builtin to fail the test when the compiler already knows about the alignment issue (falling back to libatomic, which we don't require/want). With this, 64bit builtins should be selected only for platforms that can natively handle atomics on any apr_uin64_t (since the APR has no dedicated 8-byte aligned 64bit type for now), while the generic/mutex implementation is used for others. git-svn-id: https://svn.apache.org/repos/asf/apr/apr/trunk@1907642 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r--configure.in68
1 files changed, 43 insertions, 25 deletions
diff --git a/configure.in b/configure.in
index 2cdf5e33f..e47d8925b 100644
--- a/configure.in
+++ b/configure.in
@@ -572,31 +572,35 @@ AC_CACHE_CHECK([whether the compiler provides 64bit atomic builtins], [ap_cv_ato
[AC_TRY_RUN([
#if HAVE_STDINT_H
#include <stdint.h>
+typedef uint64_t u64_t;
+#else
+typedef unsigned long long u64_t;
#endif
int main(int argc, const char *const *argv)
{
-#if HAVE_STDINT_H
- uint64_t val = 1010, tmp, *mem = &val;
-#else
- unsigned long long val = 1010, tmp, *mem = &val;
-#endif
-
- if (__sync_fetch_and_add(&val, 1010) != 1010 || val != 2020)
+ struct {
+ char pad0;
+ u64_t val;
+ } s;
+ u64_t *mem = &s.val, tmp;
+
+ s.val = 1010;
+ if (__sync_fetch_and_add(&s.val, 1010) != 1010 || s.val != 2020)
return 1;
- tmp = val;
- if (__sync_fetch_and_sub(mem, 1010) != tmp || val != 1010)
+ tmp = s.val;
+ if (__sync_fetch_and_sub(mem, 1010) != tmp || s.val != 1010)
return 1;
- if (__sync_sub_and_fetch(&val, 1010) != 0 || val != 0)
+ if (__sync_sub_and_fetch(&s.val, 1010) != 0 || s.val != 0)
return 1;
tmp = 3030;
- if (__sync_val_compare_and_swap(mem, 0, tmp) != 0 || val != tmp)
+ if (__sync_val_compare_and_swap(mem, 0, tmp) != 0 || s.val != tmp)
return 1;
__sync_synchronize();
- if (__sync_lock_test_and_set(&val, 4040) != 3030)
+ if (__sync_lock_test_and_set(&s.val, 4040) != 3030)
return 1;
return 0;
@@ -606,31 +610,45 @@ AC_CACHE_CHECK([whether the compiler provides 64bit __atomic builtins], [ap_cv__
[AC_TRY_RUN([
#if HAVE_STDINT_H
#include <stdint.h>
+typedef uint64_t u64_t;
+#else
+typedef unsigned long long u64_t;
#endif
+static int test_always_lock_free(volatile u64_t *val)
+{
+ return __atomic_always_lock_free(sizeof(*val), val);
+}
int main(int argc, const char *const *argv)
{
-#if HAVE_STDINT_H
- uint64_t val = 1010, tmp, *mem = &val;
-#else
- unsigned long long val = 1010, tmp, *mem = &val;
-#endif
+ struct {
+ char pad0;
+ u64_t val;
+ char pad1;
+ u64_t tmp;
+ } s;
+ u64_t *mem = &s.val;
+
+ /* check if alignment matters (no fallback to libatomic) */
+ if (!test_always_lock_free(&s.val))
+ return 1;
- if (__atomic_fetch_add(&val, 1010, __ATOMIC_SEQ_CST) != 1010 || val != 2020)
+ s.val = 1010;
+ if (__atomic_fetch_add(&s.val, 1010, __ATOMIC_SEQ_CST) != 1010 || s.val != 2020)
return 1;
- tmp = val;
- if (__atomic_fetch_sub(mem, 1010, __ATOMIC_SEQ_CST) != tmp || val != 1010)
+ s.tmp = s.val;
+ if (__atomic_fetch_sub(mem, 1010, __ATOMIC_SEQ_CST) != s.tmp || s.val != 1010)
return 1;
- if (__atomic_sub_fetch(&val, 1010, __ATOMIC_SEQ_CST) != 0 || val != 0)
+ if (__atomic_sub_fetch(&s.val, 1010, __ATOMIC_SEQ_CST) != 0 || s.val != 0)
return 1;
- tmp = val;
- if (!__atomic_compare_exchange_n(mem, &tmp, 3030, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
- || tmp != 0)
+ s.tmp = s.val;
+ if (!__atomic_compare_exchange_n(mem, &s.tmp, 3030, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+ || s.tmp != 0)
return 1;
- if (__atomic_exchange_n(&val, 4040, __ATOMIC_SEQ_CST) != 3030)
+ if (__atomic_exchange_n(&s.val, 4040, __ATOMIC_SEQ_CST) != 3030)
return 1;
return 0;