summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2022-02-14 08:18:08 +0300
committerIvan Maidanski <ivmai@mail.ru>2022-02-14 11:39:14 +0300
commit108720a92507e699370093754b3baed39d4c341c (patch)
treee494d1432deeb2c4a4f0e6b31e5e619e303d3c69
parent152e04c935ed5eb3800768636796d8e7151e072d (diff)
downloadlibatomic_ops-108720a92507e699370093754b3baed39d4c341c.tar.gz
Reformat atomic_ops_stack.c/h files
* README_stack.txt: Place quotes properly for "popped". * src/atomic_ops_stack.c [AO_USE_ALMOST_LOCK_FREE] (AO_stack_push_explicit_aux_release, PRECHECK, AO_load_next, AO_stack_pop_explicit_aux_acquire): Reformat comments, adjust code indentation. * src/atomic_ops_stack.c [!AO_USE_ALMOST_LOCK_FREE] (ptr, AO_stack_push_release, AO_stack_pop_acquire): Likewise. * src/atomic_ops_stack.h [AO_USE_ALMOST_LOCK_FREE] (AO_BL_SIZE, AO_stack_push_explicit_aux_release): Likewise. * src/atomic_ops_stack.h: Move copyright to be the first comment in the file.
-rw-r--r--README_stack.txt2
-rw-r--r--src/atomic_ops_stack.c516
-rw-r--r--src/atomic_ops_stack.h198
3 files changed, 353 insertions, 363 deletions
diff --git a/README_stack.txt b/README_stack.txt
index 0c1bba6..66c167c 100644
--- a/README_stack.txt
+++ b/README_stack.txt
@@ -60,7 +60,7 @@ AO_t * AO_stack_pop_acquire(volatile AO_stack_t *list);
We require that the objects pushed as list elements remain addressable
as long as any push or pop operation are in progress. (It is OK for an object
-to be "pop"ped off a stack and "deallocated" with a concurrent "pop" on
+to be "popped" off a stack and "deallocated" with a concurrent "pop" on
the same stack still in progress, but only if "deallocation" leaves the
object addressable. The second "pop" may still read the object, but
the value it reads will not matter.)
diff --git a/src/atomic_ops_stack.c b/src/atomic_ops_stack.c
index c989021..2067361 100644
--- a/src/atomic_ops_stack.c
+++ b/src/atomic_ops_stack.c
@@ -43,306 +43,296 @@
# ifdef __cplusplus
extern "C" {
# endif
-
AO_API void AO_pause(int); /* defined in atomic_ops.c */
-
# ifdef __cplusplus
} /* extern "C" */
# endif
-/* LIFO linked lists based on compare-and-swap. We need to avoid */
-/* the case of a node deletion and reinsertion while I'm deleting */
-/* it, since that may cause my CAS to succeed eventhough the next */
-/* pointer is now wrong. Our solution is not fully lock-free, but it */
-/* is good enough for signal handlers, provided we have a suitably low */
-/* bound on the number of recursive signal handler reentries. */
-/* A list consists of a first pointer and a blacklist */
-/* of pointer values that are currently being removed. No list element */
-/* on the blacklist may be inserted. If we would otherwise do so, we */
-/* are allowed to insert a variant that differs only in the least */
-/* significant, ignored, bits. If the list is full, we wait. */
-
-/* Crucial observation: A particular padded pointer x (i.e. pointer */
-/* plus arbitrary low order bits) can never be newly inserted into */
-/* a list while it's in the corresponding auxiliary data structure. */
-
-/* The second argument is a pointer to the link field of the element */
-/* to be inserted. */
-/* Both list headers and link fields contain "perturbed" pointers, i.e. */
-/* pointers with extra bits "or"ed into the low order bits. */
-AO_API void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
- AO_stack_aux *a)
-{
- AO_t x_bits = (AO_t)x;
- AO_t next;
-
- /* No deletions of x can start here, since x is not currently in the */
- /* list. */
- retry:
+ /* LIFO linked lists based on compare-and-swap. We need to avoid */
+ /* the case of a node deletion and reinsertion while I'm deleting */
+ /* it, since that may cause my CAS to succeed eventhough the next */
+ /* pointer is now wrong. Our solution is not fully lock-free, but it */
+ /* is good enough for signal handlers, provided we have a suitably */
+ /* low bound on the number of recursive signal handler reentries. */
+ /* A list consists of a first pointer and a blacklist of pointer */
+ /* values that are currently being removed. No list element on */
+ /* the blacklist may be inserted. If we would otherwise do so, we */
+ /* are allowed to insert a variant that differs only in the least */
+ /* significant, ignored, bits. If the list is full, we wait. */
+
+ /* Crucial observation: A particular padded pointer x (i.e. pointer */
+ /* plus arbitrary low order bits) can never be newly inserted into */
+ /* a list while it's in the corresponding auxiliary data structure. */
+
+ /* The second argument is a pointer to the link field of the element */
+ /* to be inserted. */
+ /* Both list headers and link fields contain "perturbed" pointers, */
+ /* i.e. pointers with extra bits or'ed into the low order bits. */
+ AO_API void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
+ AO_stack_aux *a)
{
-# if AO_BL_SIZE == 2
- /* Start all loads as close to concurrently as possible. */
- AO_t entry1 = AO_load(&a->AO_stack_bl[0]);
- AO_t entry2 = AO_load(&a->AO_stack_bl[1]);
- if (entry1 == x_bits || entry2 == x_bits)
-# else
- int i;
- for (i = 0; i < AO_BL_SIZE; ++i)
- if (AO_load(&a->AO_stack_bl[i]) == x_bits)
-# endif
- {
- /* Entry is currently being removed. Change it a little. */
+ AO_t x_bits = (AO_t)x;
+ AO_t next;
+
+ /* No deletions of x can start here, since x is not */
+ /* currently in the list. */
+ retry:
+ {
+# if AO_BL_SIZE == 2
+ /* Start all loads as close to concurrently as possible. */
+ AO_t entry1 = AO_load(&a->AO_stack_bl[0]);
+ AO_t entry2 = AO_load(&a->AO_stack_bl[1]);
+ if (entry1 == x_bits || entry2 == x_bits)
+# else
+ int i;
+ for (i = 0; i < AO_BL_SIZE; ++i)
+ if (AO_load(&a->AO_stack_bl[i]) == x_bits)
+# endif
+ {
+ /* Entry is currently being removed. Change it a little. */
++x_bits;
if ((x_bits & AO_BIT_MASK) == 0)
- /* Version count overflowed; */
- /* EXTREMELY unlikely, but possible. */
+ /* Version count overflowed; EXTREMELY unlikely, but possible. */
x_bits = (AO_t)x;
- goto retry;
+ goto retry;
+ }
+ }
+
+ /* x_bits is not currently being deleted */
+ do
+ {
+ next = AO_load(list);
+ store_before_cas(x, next);
}
+ while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next, x_bits)));
}
- /* x_bits is not currently being deleted */
- do
+ /* I concluded experimentally that checking a value first before */
+ /* performing a compare-and-swap is usually beneficial on x86, but */
+ /* slows things down appreciably with contention on Itanium. */
+ /* Since the Itanium behavior makes more sense to me (more cache line */
+ /* movement unless we're mostly reading, but back-off should guard */
+ /* against that), we take Itanium as the default. Measurements on */
+ /* other multiprocessor architectures would be useful. */
+ /* (On a uniprocessor, the initial check is almost certainly a very */
+ /* small loss.) - HB */
+# ifdef __i386__
+# define PRECHECK(a) (a) == 0 &&
+# else
+# define PRECHECK(a)
+# endif
+
+ /* This function is used before CAS in the below AO_stack_pop and the */
+ /* data race (reported by TSan) is OK because it results in a retry. */
+# ifdef AO_THREAD_SANITIZER
+ AO_ATTR_NO_SANITIZE_THREAD
+ static AO_t AO_load_next(const volatile AO_t *first_ptr)
{
- next = AO_load(list);
- store_before_cas(x, next);
+ /* Assuming an architecture on which loads of word type are */
+ /* atomic. AO_load cannot be used here because it cannot be */
+ /* instructed to suppress the warning about the race. */
+ return *first_ptr;
}
- while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next, x_bits)));
-}
-
-/*
- * I concluded experimentally that checking a value first before
- * performing a compare-and-swap is usually beneficial on X86, but
- * slows things down appreciably with contention on Itanium.
- * Since the Itanium behavior makes more sense to me (more cache line
- * movement unless we're mostly reading, but back-off should guard
- * against that), we take Itanium as the default. Measurements on
- * other multiprocessor architectures would be useful. (On a uniprocessor,
- * the initial check is almost certainly a very small loss.) - HB
- */
-#ifdef __i386__
-# define PRECHECK(a) (a) == 0 &&
-#else
-# define PRECHECK(a)
-#endif
+# else
+# define AO_load_next AO_load
+# endif
-/* This function is used before CAS in the below AO_stack_pop() and the */
-/* data race (reported by TSan) is OK because it results in a retry. */
-#ifdef AO_THREAD_SANITIZER
- AO_ATTR_NO_SANITIZE_THREAD
- static AO_t AO_load_next(const volatile AO_t *first_ptr)
+ AO_API AO_t *AO_stack_pop_explicit_aux_acquire(volatile AO_t *list,
+ AO_stack_aux *a)
{
- /* Assuming an architecture on which loads of word type are atomic. */
- /* AO_load cannot be used here because it cannot be instructed to */
- /* suppress the warning about the race. */
- return *first_ptr;
- }
-#else
-# define AO_load_next AO_load
-#endif
+ unsigned i;
+ int j = 0;
+ AO_t first;
+ AO_t * first_ptr;
+ AO_t next;
-AO_API AO_t *AO_stack_pop_explicit_aux_acquire(volatile AO_t *list,
- AO_stack_aux *a)
-{
- unsigned i;
- int j = 0;
- AO_t first;
- AO_t * first_ptr;
- AO_t next;
-
- retry:
- first = AO_load(list);
- if (0 == first) return 0;
- /* Insert first into aux black list. */
- /* This may spin if more than AO_BL_SIZE removals using auxiliary */
- /* structure a are currently in progress. */
- for (i = 0; ; )
- {
- if (PRECHECK(a -> AO_stack_bl[i])
- AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first))
- break;
- ++i;
- if ( i >= AO_BL_SIZE )
- {
- i = 0;
- AO_pause(++j);
- }
- }
- assert(i < AO_BL_SIZE);
-# ifndef AO_THREAD_SANITIZER
- assert(a -> AO_stack_bl[i] == first);
+ retry:
+ first = AO_load(list);
+ if (0 == first) return 0;
+ /* Insert first into aux black list. */
+ /* This may spin if more than AO_BL_SIZE removals using auxiliary */
+ /* structure a are currently in progress. */
+ for (i = 0; ; )
+ {
+ if (PRECHECK(a -> AO_stack_bl[i])
+ AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first))
+ break;
+ if (++i >= AO_BL_SIZE)
+ {
+ i = 0;
+ AO_pause(++j);
+ }
+ }
+ assert(i < AO_BL_SIZE);
+# ifndef AO_THREAD_SANITIZER
+ assert(a -> AO_stack_bl[i] == first);
/* No actual race with the above CAS. */
-# endif
- /* First is on the auxiliary black list. It may be removed by */
- /* another thread before we get to it, but a new insertion of x */
- /* cannot be started here. */
- /* Only we can remove it from the black list. */
- /* We need to make sure that first is still the first entry on the */
- /* list. Otherwise it's possible that a reinsertion of it was */
- /* already started before we added the black list entry. */
-# if defined(__alpha__) && (__GNUC__ == 4)
- if (first != AO_load_acquire(list))
+# endif
+ /* First is on the auxiliary black list. It may be removed by */
+ /* another thread before we get to it, but a new insertion of x */
+ /* cannot be started here. Only we can remove it from the black */
+ /* list. We need to make sure that first is still the first entry */
+ /* on the list. Otherwise it is possible that a reinsertion of it */
+ /* was already started before we added the black list entry. */
+# if defined(__alpha__) && (__GNUC__ == 4)
+ if (first != AO_load_acquire(list))
/* Workaround __builtin_expect bug found in */
/* gcc-4.6.3/alpha causing test_stack failure. */
-# else
- if (AO_EXPECT_FALSE(first != AO_load_acquire(list)))
+# else
+ if (AO_EXPECT_FALSE(first != AO_load_acquire(list)))
/* Workaround test failure on AIX, at least, by */
/* using acquire ordering semantics for this */
/* load. Probably, it is not the right fix. */
-# endif
- {
- AO_store_release(a->AO_stack_bl+i, 0);
- goto retry;
- }
- first_ptr = AO_REAL_NEXT_PTR(first);
- next = AO_load_next(first_ptr);
-# if defined(__alpha__) && (__GNUC__ == 4)
- if (!AO_compare_and_swap_release(list, first, next))
-# else
- if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next)))
-# endif
- {
+# endif
+ {
+ AO_store_release(a->AO_stack_bl+i, 0);
+ goto retry;
+ }
+ first_ptr = AO_REAL_NEXT_PTR(first);
+ next = AO_load_next(first_ptr);
+# if defined(__alpha__) && (__GNUC__ == 4)
+ if (!AO_compare_and_swap_release(list, first, next))
+# else
+ if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next)))
+# endif
+ {
+ AO_store_release(a->AO_stack_bl+i, 0);
+ goto retry;
+ }
+# ifndef AO_THREAD_SANITIZER
+ assert(*list != first); /* No actual race with the above CAS. */
+# endif
+ /* Since we never insert an entry on the black list, this cannot */
+ /* have succeeded unless first remained on the list while we were */
+ /* running. Thus, its next link cannot have changed out from under */
+ /* us, and we removed exactly one entry and preserved the rest of */
+ /* the list. Note that it is quite possible that an additional */
+ /* entry was inserted and removed while we were running; this is OK */
+ /* since the part of the list following first must have remained */
+ /* unchanged, and first must again have been at the head of the */
+ /* list when the compare_and_swap succeeded. */
AO_store_release(a->AO_stack_bl+i, 0);
- goto retry;
+ return first_ptr;
}
-# ifndef AO_THREAD_SANITIZER
- assert(*list != first); /* No actual race with the above CAS. */
-# endif
- /* Since we never insert an entry on the black list, this cannot have */
- /* succeeded unless first remained on the list while we were running. */
- /* Thus its next link cannot have changed out from under us, and we */
- /* removed exactly one entry and preserved the rest of the list. */
- /* Note that it is quite possible that an additional entry was */
- /* inserted and removed while we were running; this is OK since the */
- /* part of the list following first must have remained unchanged, and */
- /* first must again have been at the head of the list when the */
- /* compare_and_swap succeeded. */
- AO_store_release(a->AO_stack_bl+i, 0);
- return first_ptr;
-}
#else /* ! USE_ALMOST_LOCK_FREE */
-/* The functionality is the same as of AO_load_next but the atomicity */
-/* is not needed. The usage is similar to that of store_before_cas. */
-#if defined(AO_THREAD_SANITIZER) \
- && (defined(AO_HAVE_compare_and_swap_double) \
- || defined(AO_HAVE_compare_double_and_swap_double))
- /* TODO: If compiled by Clang (as of clang-4.0) with -O3 flag, */
- /* no_sanitize attribute is ignored unless the argument is volatile. */
-# if defined(__clang__)
-# define LOAD_BEFORE_CAS_VOLATILE volatile
+ /* The functionality is the same as of AO_load_next but the atomicity */
+ /* is not needed. The usage is similar to that of store_before_cas. */
+# if defined(AO_THREAD_SANITIZER) \
+ && (defined(AO_HAVE_compare_and_swap_double) \
+ || defined(AO_HAVE_compare_double_and_swap_double))
+ /* TODO: If compiled by Clang (as of clang-4.0) with -O3 flag, */
+ /* no_sanitize attribute is ignored unless the argument is volatile.*/
+# if defined(__clang__)
+# define LOAD_BEFORE_CAS_VOLATILE volatile
+# else
+# define LOAD_BEFORE_CAS_VOLATILE /* empty */
+# endif
+ AO_ATTR_NO_SANITIZE_THREAD
+ static AO_t load_before_cas(const LOAD_BEFORE_CAS_VOLATILE AO_t *addr)
+ {
+ return *addr;
+ }
# else
-# define LOAD_BEFORE_CAS_VOLATILE /* empty */
-# endif
- AO_ATTR_NO_SANITIZE_THREAD
- static AO_t load_before_cas(const LOAD_BEFORE_CAS_VOLATILE AO_t *addr)
- {
- return *addr;
- }
-#else
-# define load_before_cas(addr) (*(addr))
-#endif
-
-/* Better names for fields in AO_stack_t */
-#define ptr AO_val2
-#define version AO_val1
+# define load_before_cas(addr) (*(addr))
+# endif /* !AO_THREAD_SANITIZER */
-#if defined(AO_HAVE_compare_double_and_swap_double) \
- && !(defined(AO_STACK_PREFER_CAS_DOUBLE) \
- && defined(AO_HAVE_compare_and_swap_double))
+ /* Better names for fields in AO_stack_t. */
+# define ptr AO_val2
+# define version AO_val1
-#ifdef LINT2
- volatile /* non-static */ AO_t AO_noop_sink;
-#endif
-
-AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *element)
-{
- AO_t next;
+# if defined(AO_HAVE_compare_double_and_swap_double) \
+ && !(defined(AO_STACK_PREFER_CAS_DOUBLE) \
+ && defined(AO_HAVE_compare_and_swap_double))
- do {
- next = AO_load(&(list -> ptr));
- store_before_cas(element, next);
- } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(&(list -> ptr),
- next, (AO_t)element)));
- /* This uses a narrow CAS here, an old optimization suggested */
- /* by Treiber. Pop is still safe, since we run into the ABA */
- /* problem only if there were both intervening "pop"s and "push"es. */
- /* In that case we still see a change in the version number. */
# ifdef LINT2
- /* Instruct static analyzer that element is not lost. */
- AO_noop_sink = (AO_t)element;
+ volatile /* non-static */ AO_t AO_noop_sink;
# endif
-}
-AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
-{
-# if defined(__clang__) && !AO_CLANG_PREREQ(3, 5)
- AO_t *volatile cptr;
+ AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *element)
+ {
+ AO_t next;
+
+ do {
+ next = AO_load(&list->ptr);
+ store_before_cas(element, next);
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(&list->ptr, next,
+ (AO_t)element)));
+ /* This uses a narrow CAS here, an old optimization suggested */
+ /* by Treiber. Pop is still safe, since we run into the ABA */
+ /* problem only if there were both intervening pops and pushes. */
+ /* In that case we still see a change in the version number. */
+# ifdef LINT2
+ /* Instruct static analyzer that element is not lost. */
+ AO_noop_sink = (AO_t)element;
+# endif
+ }
+
+ AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
+ {
+# if defined(__clang__) && !AO_CLANG_PREREQ(3, 5)
+ AO_t *volatile cptr;
/* Use volatile to workaround a bug in */
/* clang-1.1/x86 causing test_stack failure. */
-# else
+# else
+ AO_t *cptr;
+# endif
+ AO_t next;
+ AO_t cversion;
+
+ do {
+ /* Version must be loaded first. */
+ cversion = AO_load_acquire(&list->version);
+ cptr = (AO_t *)AO_load(&list->ptr);
+ if (NULL == cptr)
+ return NULL;
+ next = load_before_cas((AO_t *)cptr);
+ } while (AO_EXPECT_FALSE(!AO_compare_double_and_swap_double_release(list,
+ cversion, (AO_t)cptr,
+ cversion+1, (AO_t)next)));
+ return cptr;
+ }
+
+# elif defined(AO_HAVE_compare_and_swap_double)
+
+ /* Needed for future IA64 processors. No current clients? */
+ /* TODO: Not tested thoroughly. */
+
+ /* We have a wide CAS, but only does an AO_t-wide comparison. */
+ /* We cannot use the Treiber optimization, since we only check */
+ /* for an unchanged version number, not an unchanged pointer. */
+ AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *element)
+ {
+ AO_t version;
+
+ do {
+ AO_t next_ptr;
+
+ /* Again version must be loaded first, for different reason. */
+ version = AO_load_acquire(&list->version);
+ next_ptr = AO_load(&list->ptr);
+ store_before_cas(element, next_ptr);
+ } while (!AO_compare_and_swap_double_release(list, version, version+1,
+ (AO_t)element));
+ }
+
+ AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
+ {
AO_t *cptr;
-# endif
- AO_t next;
- AO_t cversion;
-
- do {
- /* Version must be loaded first. */
- cversion = AO_load_acquire(&(list -> version));
- cptr = (AO_t *)AO_load(&(list -> ptr));
- if (NULL == cptr)
- return NULL;
- next = load_before_cas((AO_t *)cptr);
- } while (AO_EXPECT_FALSE(!AO_compare_double_and_swap_double_release(list,
- cversion, (AO_t)cptr,
- cversion+1, (AO_t)next)));
- return cptr;
-}
-
-
-#elif defined(AO_HAVE_compare_and_swap_double)
-
-/* Needed for future IA64 processors. No current clients? */
-/* TODO: Not tested thoroughly. */
-
-/* We have a wide CAS, but only does an AO_t-wide comparison. */
-/* We can't use the Treiber optimization, since we only check */
-/* for an unchanged version number, not an unchanged pointer. */
-AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *element)
-{
- AO_t version;
-
- do {
- AO_t next_ptr;
-
- /* Again version must be loaded first, for different reason. */
- version = AO_load_acquire(&(list -> version));
- next_ptr = AO_load(&(list -> ptr));
- store_before_cas(element, next_ptr);
- } while (!AO_compare_and_swap_double_release(
- list, version,
- version+1, (AO_t) element));
-}
-
-AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
-{
- AO_t *cptr;
- AO_t next;
- AO_t cversion;
-
- do {
- cversion = AO_load_acquire(&(list -> version));
- cptr = (AO_t *)AO_load(&(list -> ptr));
- if (NULL == cptr)
- return NULL;
- next = load_before_cas(cptr);
- } while (!AO_compare_double_and_swap_double_release(list,
- cversion, (AO_t)cptr,
- cversion+1, next));
- return cptr;
-}
-
-#endif /* AO_HAVE_compare_and_swap_double */
+ AO_t next;
+ AO_t cversion;
+
+ do {
+ cversion = AO_load_acquire(&list->version);
+ cptr = (AO_t *)AO_load(&list->ptr);
+ if (NULL == cptr)
+ return NULL;
+ next = load_before_cas(cptr);
+ } while (!AO_compare_double_and_swap_double_release(list,
+ cversion, (AO_t)cptr, cversion+1, next));
+ return cptr;
+ }
+# endif /* AO_HAVE_compare_and_swap_double */
#endif /* ! USE_ALMOST_LOCK_FREE */
diff --git a/src/atomic_ops_stack.h b/src/atomic_ops_stack.h
index d71f22c..cf231ab 100644
--- a/src/atomic_ops_stack.h
+++ b/src/atomic_ops_stack.h
@@ -1,9 +1,4 @@
/*
- * The implementation of the routines described here is covered by the GPL.
- * This header file is covered by the following license:
- */
-
-/*
* Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -25,6 +20,11 @@
* SOFTWARE.
*/
+/*
+ * The implementation of the routines described here is covered by the GPL.
+ * This header file is covered by the above license.
+ */
+
/* Almost lock-free LIFO linked lists (linked stacks). */
#ifndef AO_STACK_H
#define AO_STACK_H
@@ -74,8 +74,9 @@
*/
#ifdef AO_USE_ALMOST_LOCK_FREE
-/* The number of low order pointer bits we can use for a small */
-/* version number. */
+
+ /* The number of low order pointer bits we can use for a small */
+ /* version number. */
# if defined(__LP64__) || defined(_LP64) || defined(_WIN64)
# define AO_N_BITS 3
# else
@@ -83,112 +84,111 @@
# endif
# define AO_BIT_MASK ((1 << AO_N_BITS) - 1)
-/*
- * AO_stack_aux should be treated as opaque.
- * It is fully defined here, so it can be allocated, and to facilitate
- * debugging.
- */
-#ifndef AO_BL_SIZE
-# define AO_BL_SIZE 2
-#endif
-
-#if AO_BL_SIZE > (1 << AO_N_BITS)
-# error AO_BL_SIZE too big
-#endif
-typedef struct AO__stack_aux {
- volatile AO_t AO_stack_bl[AO_BL_SIZE];
-} AO_stack_aux;
-
-/* The stack implementation knows only about the location of */
-/* link fields in nodes, and nothing about the rest of the */
-/* stack elements. Link fields hold an AO_t, which is not */
-/* necessarily a real pointer. This converts the AO_t to a */
-/* real (AO_t *) which is either NULL, or points at the link */
-/* field in the next node. */
-#define AO_REAL_NEXT_PTR(x) (AO_t *)((x) & ~AO_BIT_MASK)
-
-/* The following two routines should not normally be used directly. */
-/* We make them visible here for the rare cases in which it makes sense */
-/* to share the an AO_stack_aux between stacks. */
-AO_API void
-AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
- AO_stack_aux *);
-
-AO_API AO_t *
-AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux *);
-
-/* And now AO_stack_t for the real interface: */
-
-typedef struct AO__stack {
- volatile AO_t AO_ptr;
- AO_stack_aux AO_aux;
-} AO_stack_t;
-
-#define AO_STACK_INITIALIZER {0,{{0}}}
-
-AO_INLINE void AO_stack_init(AO_stack_t *list)
-{
-# if AO_BL_SIZE == 2
- list -> AO_aux.AO_stack_bl[0] = 0;
- list -> AO_aux.AO_stack_bl[1] = 0;
-# else
- int i;
- for (i = 0; i < AO_BL_SIZE; ++i)
- list -> AO_aux.AO_stack_bl[i] = 0;
+ /* AO_stack_aux should be treated as opaque. It is fully defined */
+ /* here, so it can be allocated, and to facilitate debugging. */
+# ifndef AO_BL_SIZE
+# define AO_BL_SIZE 2
# endif
- list -> AO_ptr = 0;
-}
-/* Convert an AO_stack_t to a pointer to the link field in */
-/* the first element. */
-#define AO_REAL_HEAD_PTR(x) AO_REAL_NEXT_PTR((x).AO_ptr)
+# if AO_BL_SIZE > (1 << AO_N_BITS)
+# error AO_BL_SIZE too big
+# endif
-#define AO_stack_push_release(l, e) \
+ typedef struct AO__stack_aux {
+ volatile AO_t AO_stack_bl[AO_BL_SIZE];
+ } AO_stack_aux;
+
+ /* The stack implementation knows only about the location of */
+ /* link fields in nodes, and nothing about the rest of the */
+ /* stack elements. Link fields hold an AO_t, which is not */
+ /* necessarily a real pointer. This converts the AO_t to a */
+ /* real (AO_t *) which is either NULL, or points at the link */
+ /* field in the next node. */
+# define AO_REAL_NEXT_PTR(x) (AO_t *)((x) & ~AO_BIT_MASK)
+
+ /* The following two routines should not normally be used directly. */
+ /* We make them visible here for the rare cases in which it makes */
+ /* sense to share the an AO_stack_aux between stacks. */
+ AO_API void
+ AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
+ AO_stack_aux *);
+
+ AO_API AO_t *
+ AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux *);
+
+ /* And now AO_stack_t for the real interface: */
+
+ typedef struct AO__stack {
+ volatile AO_t AO_ptr;
+ AO_stack_aux AO_aux;
+ } AO_stack_t;
+
+# define AO_STACK_INITIALIZER {0,{{0}}}
+
+ AO_INLINE void AO_stack_init(AO_stack_t *list)
+ {
+# if AO_BL_SIZE == 2
+ list -> AO_aux.AO_stack_bl[0] = 0;
+ list -> AO_aux.AO_stack_bl[1] = 0;
+# else
+ int i;
+ for (i = 0; i < AO_BL_SIZE; ++i)
+ list -> AO_aux.AO_stack_bl[i] = 0;
+# endif
+ list -> AO_ptr = 0;
+ }
+
+ /* Convert an AO_stack_t to a pointer to the link field in */
+ /* the first element. */
+# define AO_REAL_HEAD_PTR(x) AO_REAL_NEXT_PTR((x).AO_ptr)
+
+# define AO_stack_push_release(l, e) \
AO_stack_push_explicit_aux_release(&((l)->AO_ptr), e, &((l)->AO_aux))
-#define AO_HAVE_stack_push_release
+# define AO_HAVE_stack_push_release
-#define AO_stack_pop_acquire(l) \
+# define AO_stack_pop_acquire(l) \
AO_stack_pop_explicit_aux_acquire(&((l)->AO_ptr), &((l)->AO_aux))
-#define AO_HAVE_stack_pop_acquire
-
-# else /* Use fully non-blocking data structure, wide CAS */
-
-#ifndef AO_HAVE_double_t
- /* Can happen if we're using CAS emulation, since we don't want to */
- /* force that here, in case other atomic_ops clients don't want it. */
-# ifdef __cplusplus
- } /* extern "C" */
-# endif
-# include "atomic_ops/sysdeps/standard_ao_double_t.h"
-# ifdef __cplusplus
- extern "C" {
+# define AO_HAVE_stack_pop_acquire
+
+#else /* Use fully non-blocking data structure, wide CAS. */
+
+# ifndef AO_HAVE_double_t
+ /* Can happen if we are using CAS emulation, since we don't want to */
+ /* force that here, in case other atomic_ops clients don't want it. */
+# ifdef __cplusplus
+ } /* extern "C" */
+# endif
+# include "atomic_ops/sysdeps/standard_ao_double_t.h"
+# ifdef __cplusplus
+ extern "C" {
+# endif
# endif
-#endif
-typedef volatile AO_double_t AO_stack_t;
-/* AO_val1 is version, AO_val2 is pointer. */
-/* Note: AO_stack_t variables are not intended to be local ones, */
-/* otherwise it is the client responsibility to ensure they have */
-/* double-word alignment. */
+ typedef volatile AO_double_t AO_stack_t;
+ /* AO_val1 is version, AO_val2 is pointer. */
+ /* Note: AO_stack_t variables are not intended to be local ones, */
+ /* otherwise it is the client responsibility to ensure they have */
+ /* double-word alignment. */
+
+# define AO_STACK_INITIALIZER AO_DOUBLE_T_INITIALIZER
-#define AO_STACK_INITIALIZER AO_DOUBLE_T_INITIALIZER
+ AO_INLINE void AO_stack_init(AO_stack_t *list)
+ {
+ list -> AO_val1 = 0;
+ list -> AO_val2 = 0;
+ }
-AO_INLINE void AO_stack_init(AO_stack_t *list)
-{
- list -> AO_val1 = 0;
- list -> AO_val2 = 0;
-}
+# define AO_REAL_HEAD_PTR(x) (AO_t *)((x).AO_val2)
+# define AO_REAL_NEXT_PTR(x) (AO_t *)(x)
-#define AO_REAL_HEAD_PTR(x) (AO_t *)((x).AO_val2)
-#define AO_REAL_NEXT_PTR(x) (AO_t *)(x)
+ AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *new_element);
+# define AO_HAVE_stack_push_release
-AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *new_element);
-#define AO_HAVE_stack_push_release
-AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list);
-#define AO_HAVE_stack_pop_acquire
+ AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list);
+# define AO_HAVE_stack_pop_acquire
-#endif /* Wide CAS case */
+#endif /* !AO_USE_ALMOST_LOCK_FREE */
#if defined(AO_HAVE_stack_push_release) && !defined(AO_HAVE_stack_push)
# define AO_stack_push(l, e) AO_stack_push_release(l, e)