summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/prng.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/include/jemalloc/internal/prng.h')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prng.h93
1 files changed, 38 insertions, 55 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h
index 15cc2d18f..14542aa12 100644
--- a/deps/jemalloc/include/jemalloc/internal/prng.h
+++ b/deps/jemalloc/include/jemalloc/internal/prng.h
@@ -1,7 +1,6 @@
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
-#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
@@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) {
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
- * 2**lg_range). If atomic is true, they do so safely from multiple threads.
- * Multithreaded 64-bit prngs aren't supported.
+ * 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
- uint32_t ret, state0, state1;
-
+prng_lg_range_u32(uint32_t *state, unsigned lg_range) {
assert(lg_range > 0);
assert(lg_range <= 32);
- state0 = atomic_load_u32(state, ATOMIC_RELAXED);
-
- if (atomic) {
- do {
- state1 = prng_state_next_u32(state0);
- } while (!atomic_compare_exchange_weak_u32(state, &state0,
- state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
- } else {
- state1 = prng_state_next_u32(state0);
- atomic_store_u32(state, state1, ATOMIC_RELAXED);
- }
- ret = state1 >> (32 - lg_range);
+ *state = prng_state_next_u32(*state);
+ uint32_t ret = *state >> (32 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
- uint64_t ret, state1;
-
assert(lg_range > 0);
assert(lg_range <= 64);
- state1 = prng_state_next_u64(*state);
- *state = state1;
- ret = state1 >> (64 - lg_range);
+ *state = prng_state_next_u64(*state);
+ uint64_t ret = *state >> (64 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
- size_t ret, state0, state1;
-
+prng_lg_range_zu(size_t *state, unsigned lg_range) {
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
- state0 = atomic_load_zu(state, ATOMIC_RELAXED);
-
- if (atomic) {
- do {
- state1 = prng_state_next_zu(state0);
- } while (atomic_compare_exchange_weak_zu(state, &state0,
- state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
- } else {
- state1 = prng_state_next_zu(state0);
- atomic_store_zu(state, state1, ATOMIC_RELAXED);
- }
- ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
+ *state = prng_state_next_zu(*state);
+ size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return ret;
}
@@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
*/
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
- uint32_t ret;
- unsigned lg_range;
-
- assert(range > 1);
+prng_range_u32(uint32_t *state, uint32_t range) {
+ assert(range != 0);
+ /*
+ * If range were 1, lg_range would be 0, so the shift in
+ * prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
+ * which is UB. Just handle this case as a one-off.
+ */
+ if (range == 1) {
+ return 0;
+ }
/* Compute the ceiling of lg(range). */
- lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
+ unsigned lg_range = ffs_u32(pow2_ceil_u32(range));
/* Generate a result in [0..range) via repeated trial. */
+ uint32_t ret;
do {
- ret = prng_lg_range_u32(state, lg_range, atomic);
+ ret = prng_lg_range_u32(state, lg_range);
} while (ret >= range);
return ret;
@@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range) {
- uint64_t ret;
- unsigned lg_range;
+ assert(range != 0);
- assert(range > 1);
+ /* See the note in prng_range_u32. */
+ if (range == 1) {
+ return 0;
+ }
/* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+ unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
+ uint64_t ret;
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
@@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) {
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
- size_t ret;
- unsigned lg_range;
+prng_range_zu(size_t *state, size_t range) {
+ assert(range != 0);
- assert(range > 1);
+ /* See the note in prng_range_u32. */
+ if (range == 1) {
+ return 0;
+ }
/* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+ unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
+ size_t ret;
do {
- ret = prng_lg_range_zu(state, lg_range, atomic);
+ ret = prng_lg_range_zu(state, lg_range);
} while (ret >= range);
return ret;