summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Cahill <michael.cahill@mongodb.com>2015-08-19 12:04:55 +1000
committerKeith Bostic <keith@wiredtiger.com>2015-08-19 11:45:31 -0400
commitba32b71f75c85dd6ff3558346d19154d037888ec (patch)
tree3c3b90617c0a337f65d026bd13bc66ced3d0492d
parent1c2542ff9337af935ce95a63c91cd2024694634e (diff)
downloadmongo-ba32b71f75c85dd6ff3558346d19154d037888ec.tar.gz
Merge pull request #2112 from wiredtiger/wt-2025-inline-atomic-functions
WT-2025: inline atomic functions. (cherry picked from commit ada57c1ed44ab0dd4f904f60a63bbc77333b2baa)
-rw-r--r--bench/wtperf/wtperf.c6
-rw-r--r--dist/s_define.list22
-rw-r--r--dist/s_string.ok2
-rw-r--r--src/async/async_api.c11
-rw-r--r--src/async/async_op.c4
-rw-r--r--src/async/async_worker.c6
-rw-r--r--src/block/block_open.c4
-rw-r--r--src/btree/bt_delete.c14
-rw-r--r--src/btree/bt_page.c4
-rw-r--r--src/btree/bt_read.c6
-rw-r--r--src/btree/bt_split.c8
-rw-r--r--src/btree/row_key.c5
-rw-r--r--src/btree/row_modify.c4
-rw-r--r--src/evict/evict_lru.c15
-rw-r--r--src/include/async.h34
-rw-r--r--src/include/btmem.h18
-rw-r--r--src/include/btree.i44
-rw-r--r--src/include/cursor.i4
-rw-r--r--src/include/gcc.h103
-rw-r--r--src/include/hardware.h12
-rw-r--r--src/include/lint.h100
-rw-r--r--src/include/lsm.h8
-rw-r--r--src/include/msvc.h102
-rw-r--r--src/include/serial.i6
-rw-r--r--src/include/stat.h4
-rw-r--r--src/include/txn.i7
-rw-r--r--src/log/log_slot.c16
-rw-r--r--src/lsm/lsm_manager.c4
-rw-r--r--src/lsm/lsm_merge.c8
-rw-r--r--src/lsm/lsm_tree.c31
-rw-r--r--src/lsm/lsm_work_unit.c10
-rw-r--r--src/lsm/lsm_worker.c2
-rw-r--r--src/os_posix/os_mtx_cond.c6
-rw-r--r--src/os_posix/os_mtx_rw.c10
-rw-r--r--src/os_posix/os_open.c4
-rw-r--r--src/os_win/os_mtx_cond.c6
-rw-r--r--src/os_win/os_open.c4
-rw-r--r--src/reconcile/rec_write.c8
-rw-r--r--src/session/session_dhandle.c6
-rw-r--r--src/txn/txn.c12
40 files changed, 343 insertions, 337 deletions
diff --git a/bench/wtperf/wtperf.c b/bench/wtperf/wtperf.c
index a4f679ae736..9ee7865aa0b 100644
--- a/bench/wtperf/wtperf.c
+++ b/bench/wtperf/wtperf.c
@@ -94,7 +94,7 @@ static uint64_t wtperf_value_range(CONFIG *);
static inline uint64_t
get_next_incr(CONFIG *cfg)
{
- return (WT_ATOMIC_ADD8(cfg->insert_key, 1));
+ return (__wt_atomic_add64(&cfg->insert_key, 1));
}
static inline void
@@ -154,7 +154,7 @@ cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags)
switch (type) {
case WT_AOP_COMPACT:
tables = (uint32_t *)op->app_private;
- WT_ATOMIC_ADD4(*tables, (uint32_t)-1);
+ (void)__wt_atomic_add32(tables, (uint32_t)-1);
break;
case WT_AOP_INSERT:
trk = &thread->insert;
@@ -189,7 +189,7 @@ cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags)
return (0);
if (ret == 0 || (ret == WT_NOTFOUND && type != WT_AOP_INSERT)) {
if (!cfg->in_warmup)
- (void)WT_ATOMIC_ADD8(trk->ops, 1);
+ (void)__wt_atomic_add64(&trk->ops, 1);
return (0);
}
err:
diff --git a/dist/s_define.list b/dist/s_define.list
index 1bceb6a54fb..65fc60e9c7a 100644
--- a/dist/s_define.list
+++ b/dist/s_define.list
@@ -17,20 +17,8 @@ TXN_API_CALL
TXN_API_CALL_NOCONF
TXN_API_END
WIN32_LEAN_AND_MEAN
-WT_ATOMIC_ADD1
-WT_ATOMIC_ADD2
-WT_ATOMIC_CAS1
-WT_ATOMIC_CAS2
-WT_ATOMIC_CAS_VAL1
-WT_ATOMIC_CAS_VAL2
-WT_ATOMIC_CAS_VAL4
-WT_ATOMIC_FETCH_ADD1
-WT_ATOMIC_FETCH_ADD2
-WT_ATOMIC_FETCH_ADD4
-WT_ATOMIC_STORE1
-WT_ATOMIC_STORE2
-WT_ATOMIC_SUB1
-WT_ATOMIC_SUB2
+WT_ATOMIC_CAS
+WT_ATOMIC_FUNC
WT_BARRIER
WT_BLOCK_DESC_SIZE
WT_CACHE_LINE_ALIGNMENT
@@ -67,9 +55,3 @@ WT_WITH_LOCK
__F
__WIREDTIGER_EXT_H_
__WIREDTIGER_H_
-__WT_ATOMIC_ADD
-__WT_ATOMIC_CAS
-__WT_ATOMIC_CAS_VAL
-__WT_ATOMIC_FETCH_ADD
-__WT_ATOMIC_STORE
-__WT_ATOMIC_SUB
diff --git a/dist/s_string.ok b/dist/s_string.ok
index 2bdd1d88a54..4dd42826b55 100644
--- a/dist/s_string.ok
+++ b/dist/s_string.ok
@@ -464,6 +464,7 @@ bzalloc
bzfree
bzip
calloc
+cas
catfmt
cb
cd
@@ -909,6 +910,7 @@ ps
pse
psp
pthread
+ptr
pushms
putK
putV
diff --git a/src/async/async_api.c b/src/async/async_api.c
index 9874d7aab00..cd232af5340 100644
--- a/src/async/async_api.c
+++ b/src/async/async_api.c
@@ -151,15 +151,16 @@ retry:
* If we can set the state then the op entry is ours.
* Start the next search at the next entry after this one.
*/
- if (!WT_ATOMIC_CAS4(op->state, WT_ASYNCOP_FREE, WT_ASYNCOP_READY)) {
+ if (!__wt_atomic_cas32(&op->state, WT_ASYNCOP_FREE, WT_ASYNCOP_READY)) {
WT_STAT_FAST_CONN_INCR(session, async_alloc_race);
goto retry;
}
WT_STAT_FAST_CONN_INCRV(session, async_alloc_view, view);
WT_RET(__async_get_format(conn, uri, config, op));
- op->unique_id = WT_ATOMIC_ADD8(async->op_id, 1);
+ op->unique_id = __wt_atomic_add64(&async->op_id, 1);
op->optype = WT_AOP_NONE;
- (void)WT_ATOMIC_STORE4(async->ops_index, (i + 1) % conn->async_size);
+ (void)__wt_atomic_store32(
+ &async->ops_index, (i + 1) % conn->async_size);
*opp = op;
return (0);
}
@@ -514,7 +515,7 @@ retry:
*/
__wt_sleep(0, 100000);
- if (!WT_ATOMIC_CAS4(async->flush_state, WT_ASYNC_FLUSH_NONE,
+ if (!__wt_atomic_cas32(&async->flush_state, WT_ASYNC_FLUSH_NONE,
WT_ASYNC_FLUSH_IN_PROGRESS))
goto retry;
/*
@@ -524,7 +525,7 @@ retry:
* things off the work queue with the lock.
*/
async->flush_count = 0;
- (void)WT_ATOMIC_ADD8(async->flush_gen, 1);
+ (void)__wt_atomic_add64(&async->flush_gen, 1);
WT_ASSERT(session, async->flush_op.state == WT_ASYNCOP_FREE);
async->flush_op.state = WT_ASYNCOP_READY;
WT_ERR(__wt_async_op_enqueue(session, &async->flush_op));
diff --git a/src/async/async_op.c b/src/async/async_op.c
index d0c58f584cc..7e1920933c2 100644
--- a/src/async/async_op.c
+++ b/src/async/async_op.c
@@ -280,7 +280,7 @@ __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op)
* Enqueue op at the tail of the work queue.
* We get our slot in the ring buffer to use.
*/
- my_alloc = WT_ATOMIC_ADD8(async->alloc_head, 1);
+ my_alloc = __wt_atomic_add64(&async->alloc_head, 1);
my_slot = my_alloc % async->async_qsize;
/*
@@ -300,7 +300,7 @@ __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op)
#endif
WT_PUBLISH(async->async_queue[my_slot], op);
op->state = WT_ASYNCOP_ENQUEUED;
- if (WT_ATOMIC_ADD4(async->cur_queue, 1) > async->max_queue)
+ if (__wt_atomic_add32(&async->cur_queue, 1) > async->max_queue)
WT_PUBLISH(async->max_queue, async->cur_queue);
/*
* Multiple threads may be adding ops to the queue. We need to wait
diff --git a/src/async/async_worker.c b/src/async/async_worker.c
index 473e7103832..6a5ec5feeb0 100644
--- a/src/async/async_worker.c
+++ b/src/async/async_worker.c
@@ -67,7 +67,7 @@ retry:
* a race, try again.
*/
my_consume = last_consume + 1;
- if (!WT_ATOMIC_CAS8(async->alloc_tail, last_consume, my_consume))
+ if (!__wt_atomic_cas64(&async->alloc_tail, last_consume, my_consume))
goto retry;
/*
* This item of work is ours to process. Clear it out of the
@@ -81,7 +81,7 @@ retry:
WT_ASSERT(session, async->cur_queue > 0);
WT_ASSERT(session, *op != NULL);
WT_ASSERT(session, (*op)->state == WT_ASYNCOP_ENQUEUED);
- (void)WT_ATOMIC_SUB4(async->cur_queue, 1);
+ (void)__wt_atomic_sub32(&async->cur_queue, 1);
(*op)->state = WT_ASYNCOP_WORKING;
if (*op == &async->flush_op)
@@ -316,7 +316,7 @@ __wt_async_worker(void *arg)
* the queue.
*/
WT_ORDERED_READ(flush_gen, async->flush_gen);
- if (WT_ATOMIC_ADD4(async->flush_count, 1) ==
+ if (__wt_atomic_add32(&async->flush_count, 1) ==
conn->async_workers) {
/*
* We're last. All workers accounted for so
diff --git a/src/block/block_open.c b/src/block/block_open.c
index e1b7894aee5..9223c1ad8e4 100644
--- a/src/block/block_open.c
+++ b/src/block/block_open.c
@@ -159,9 +159,9 @@ __wt_block_configure_first_fit(WT_BLOCK *block, int on)
* as long as any operation wants it.
*/
if (on)
- (void)WT_ATOMIC_ADD4(block->allocfirst, 1);
+ (void)__wt_atomic_add32(&block->allocfirst, 1);
else
- (void)WT_ATOMIC_SUB4(block->allocfirst, 1);
+ (void)__wt_atomic_sub32(&block->allocfirst, 1);
}
/*
diff --git a/src/btree/bt_delete.c b/src/btree/bt_delete.c
index 8cca6328f21..cddfa0ef801 100644
--- a/src/btree/bt_delete.c
+++ b/src/btree/bt_delete.c
@@ -70,15 +70,15 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp)
/* If we have a clean page in memory, attempt to evict it. */
if (ref->state == WT_REF_MEM &&
- WT_ATOMIC_CAS4(ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
+ __wt_atomic_casv32(&ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
if (__wt_page_is_modified(ref->page)) {
WT_PUBLISH(ref->state, WT_REF_MEM);
return (0);
}
- (void)WT_ATOMIC_ADD4(S2BT(session)->evict_busy, 1);
+ (void)__wt_atomic_addv32(&S2BT(session)->evict_busy, 1);
ret = __wt_evict_page(session, ref);
- (void)WT_ATOMIC_SUB4(S2BT(session)->evict_busy, 1);
+ (void)__wt_atomic_subv32(&S2BT(session)->evict_busy, 1);
WT_RET_BUSY_OK(ret);
}
@@ -93,7 +93,7 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp)
* unclear optimizing for overlapping range deletes is worth the effort.
*/
if (ref->state != WT_REF_DISK ||
- !WT_ATOMIC_CAS4(ref->state, WT_REF_DISK, WT_REF_LOCKED))
+ !__wt_atomic_casv32(&ref->state, WT_REF_DISK, WT_REF_LOCKED))
return (0);
/*
@@ -176,8 +176,8 @@ __wt_delete_page_rollback(WT_SESSION_IMPL *session, WT_REF *ref)
* If the page is still "deleted", it's as we left it,
* reset the state.
*/
- if (WT_ATOMIC_CAS4(
- ref->state, WT_REF_DELETED, WT_REF_DISK))
+ if (__wt_atomic_casv32(
+ &ref->state, WT_REF_DELETED, WT_REF_DISK))
return;
break;
case WT_REF_LOCKED:
@@ -242,7 +242,7 @@ __wt_delete_page_skip(WT_SESSION_IMPL *session, WT_REF *ref)
if (ref->page_del == NULL)
return (1);
- if (!WT_ATOMIC_CAS4(ref->state, WT_REF_DELETED, WT_REF_LOCKED))
+ if (!__wt_atomic_casv32(&ref->state, WT_REF_DELETED, WT_REF_LOCKED))
return (0);
skip = (ref->page_del == NULL ||
diff --git a/src/btree/bt_page.c b/src/btree/bt_page.c
index b8b67720fce..a38c96c77a4 100644
--- a/src/btree/bt_page.c
+++ b/src/btree/bt_page.c
@@ -295,8 +295,8 @@ err: if ((pindex = WT_INTL_INDEX_GET_SAFE(page)) != NULL) {
/* Increment the cache statistics. */
__wt_cache_page_inmem_incr(session, page, size);
- (void)WT_ATOMIC_ADD8(cache->bytes_read, size);
- (void)WT_ATOMIC_ADD8(cache->pages_inmem, 1);
+ (void)__wt_atomic_add64(&cache->bytes_read, size);
+ (void)__wt_atomic_add64(&cache->pages_inmem, 1);
*pagep = page;
return (0);
diff --git a/src/btree/bt_read.c b/src/btree/bt_read.c
index e27f7c3398c..a3ce39b7758 100644
--- a/src/btree/bt_read.c
+++ b/src/btree/bt_read.c
@@ -18,8 +18,8 @@ __wt_cache_read(WT_SESSION_IMPL *session, WT_REF *ref)
WT_DECL_RET;
WT_ITEM tmp;
WT_PAGE *page;
- WT_PAGE_STATE previous_state;
size_t addr_size;
+ uint32_t previous_state;
const uint8_t *addr;
page = NULL;
@@ -35,9 +35,9 @@ __wt_cache_read(WT_SESSION_IMPL *session, WT_REF *ref)
* WT_REF_LOCKED, for deleted pages. If successful, we've won the
* race, read the page.
*/
- if (WT_ATOMIC_CAS4(ref->state, WT_REF_DISK, WT_REF_READING))
+ if (__wt_atomic_casv32(&ref->state, WT_REF_DISK, WT_REF_READING))
previous_state = WT_REF_DISK;
- else if (WT_ATOMIC_CAS4(ref->state, WT_REF_DELETED, WT_REF_LOCKED))
+ else if (__wt_atomic_casv32(&ref->state, WT_REF_DELETED, WT_REF_LOCKED))
previous_state = WT_REF_DELETED;
else
return (0);
diff --git a/src/btree/bt_split.c b/src/btree/bt_split.c
index acef71f1d94..2b20b2118ce 100644
--- a/src/btree/bt_split.c
+++ b/src/btree/bt_split.c
@@ -520,7 +520,7 @@ __split_deepen(WT_SESSION_IMPL *session, WT_PAGE *parent, uint32_t children)
*/
WT_ASSERT(session, WT_INTL_INDEX_GET_SAFE(parent) == pindex);
WT_INTL_INDEX_SET(parent, alloc_index);
- split_gen = WT_ATOMIC_ADD8(S2C(session)->split_gen, 1);
+ split_gen = __wt_atomic_addv64(&S2C(session)->split_gen, 1);
panic = 1;
#ifdef HAVE_DIAGNOSTIC
@@ -862,8 +862,8 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref,
WT_ASSERT(session, next_ref->state != WT_REF_SPLIT);
if (next_ref->state == WT_REF_DELETED &&
__wt_delete_page_skip(session, next_ref) &&
- WT_ATOMIC_CAS4(next_ref->state,
- WT_REF_DELETED, WT_REF_SPLIT))
+ __wt_atomic_casv32(
+ &next_ref->state, WT_REF_DELETED, WT_REF_SPLIT))
deleted_entries++;
}
@@ -908,7 +908,7 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref,
*/
WT_ASSERT(session, WT_INTL_INDEX_GET_SAFE(parent) == pindex);
WT_INTL_INDEX_SET(parent, alloc_index);
- split_gen = WT_ATOMIC_ADD8(S2C(session)->split_gen, 1);
+ split_gen = __wt_atomic_addv64(&S2C(session)->split_gen, 1);
alloc_index = NULL;
#ifdef HAVE_DIAGNOSTIC
diff --git a/src/btree/row_key.c b/src/btree/row_key.c
index f2868afe13a..4affa7fa62a 100644
--- a/src/btree/row_key.c
+++ b/src/btree/row_key.c
@@ -448,7 +448,8 @@ next: switch (direction) {
* update the page's memory footprint, on failure, free
* the allocated memory.
*/
- if (WT_ATOMIC_CAS8(WT_ROW_KEY_COPY(rip), copy, ikey))
+ if (__wt_atomic_cas_ptr(
+ (void *)&WT_ROW_KEY_COPY(rip), copy, ikey))
__wt_cache_page_inmem_incr(session,
page, sizeof(WT_IKEY) + ikey->size);
else
@@ -525,7 +526,7 @@ __wt_row_ikey(WT_SESSION_IMPL *session,
WT_ASSERT(session, oldv == 0 || (oldv & WT_IK_FLAG) != 0);
WT_ASSERT(session, ref->state != WT_REF_SPLIT);
WT_ASSERT(session,
- WT_ATOMIC_CAS8(ref->key.ikey, (WT_IKEY *)oldv, ikey));
+ __wt_atomic_cas_ptr(&ref->key.ikey, (WT_IKEY *)oldv, ikey));
}
#else
ref->key.ikey = ikey;
diff --git a/src/btree/row_modify.c b/src/btree/row_modify.c
index 2dd42de5900..6a9ad1c78b9 100644
--- a/src/btree/row_modify.c
+++ b/src/btree/row_modify.c
@@ -34,7 +34,7 @@ __wt_page_modify_alloc(WT_SESSION_IMPL *session, WT_PAGE *page)
* footprint, else discard the modify structure, another thread did the
* work.
*/
- if (WT_ATOMIC_CAS8(page->modify, NULL, modify))
+ if (__wt_atomic_cas_ptr(&page->modify, NULL, modify))
__wt_cache_page_inmem_incr(session, page, sizeof(*modify));
else
__wt_free(session, modify);
@@ -318,7 +318,7 @@ __wt_update_obsolete_check(
*/
if (first != NULL &&
(next = first->next) != NULL &&
- WT_ATOMIC_CAS8(first->next, next, NULL))
+ __wt_atomic_cas_ptr(&first->next, next, NULL))
return (next);
/*
diff --git a/src/evict/evict_lru.c b/src/evict/evict_lru.c
index 4cb29d4e1b9..00aeaf02437 100644
--- a/src/evict/evict_lru.c
+++ b/src/evict/evict_lru.c
@@ -981,7 +981,8 @@ retry: while (slot < max_entries && ret == 0) {
} else {
if (incr) {
WT_ASSERT(session, dhandle->session_inuse > 0);
- (void)WT_ATOMIC_SUB4(dhandle->session_inuse, 1);
+ (void)__wt_atomic_subi32(
+ &dhandle->session_inuse, 1);
incr = 0;
}
dhandle = TAILQ_NEXT(dhandle, q);
@@ -1025,7 +1026,7 @@ retry: while (slot < max_entries && ret == 0) {
btree->evict_walk_skips = 0;
prev_slot = slot;
- (void)WT_ATOMIC_ADD4(dhandle->session_inuse, 1);
+ (void)__wt_atomic_addi32(&dhandle->session_inuse, 1);
incr = 1;
__wt_spin_unlock(session, &conn->dhandle_lock);
dhandle_locked = 0;
@@ -1060,7 +1061,7 @@ retry: while (slot < max_entries && ret == 0) {
cache->evict_file_next = dhandle;
WT_ASSERT(session, dhandle->session_inuse > 0);
- (void)WT_ATOMIC_SUB4(dhandle->session_inuse, 1);
+ (void)__wt_atomic_subi32(&dhandle->session_inuse, 1);
incr = 0;
}
@@ -1331,8 +1332,8 @@ __evict_get_ref(
* multiple attempts to evict it. For pages that are already
* being evicted, this operation will fail and we will move on.
*/
- if (!WT_ATOMIC_CAS4(
- evict->ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
+ if (!__wt_atomic_casv32(
+ &evict->ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
__evict_list_clear(session, evict);
continue;
}
@@ -1341,7 +1342,7 @@ __evict_get_ref(
* Increment the busy count in the btree handle to prevent it
* from being closed under us.
*/
- (void)WT_ATOMIC_ADD4(evict->btree->evict_busy, 1);
+ (void)__wt_atomic_addv32(&evict->btree->evict_busy, 1);
*btreep = evict->btree;
*refp = evict->ref;
@@ -1420,7 +1421,7 @@ __wt_evict_lru_page(WT_SESSION_IMPL *session, int is_server)
WT_WITH_BTREE(session, btree, ret = __wt_evict_page(session, ref));
- (void)WT_ATOMIC_SUB4(btree->evict_busy, 1);
+ (void)__wt_atomic_subv32(&btree->evict_busy, 1);
WT_RET(ret);
diff --git a/src/include/async.h b/src/include/async.h
index c8d9fa5aa91..fb9a64e774d 100644
--- a/src/include/async.h
+++ b/src/include/async.h
@@ -6,20 +6,6 @@
* See the file LICENSE for redistribution information.
*/
-typedef enum {
- WT_ASYNCOP_ENQUEUED, /* Placed on the work queue */
- WT_ASYNCOP_FREE, /* Able to be allocated to user */
- WT_ASYNCOP_READY, /* Allocated and ready for user to use */
- WT_ASYNCOP_WORKING /* Operation in progress by worker */
-} WT_ASYNC_STATE;
-
-typedef enum {
- WT_ASYNC_FLUSH_NONE=0, /* No flush in progress */
- WT_ASYNC_FLUSH_COMPLETE, /* Notify flush caller it's done */
- WT_ASYNC_FLUSH_IN_PROGRESS, /* Prevent other callers */
- WT_ASYNC_FLUSHING /* Notify workers */
-} WT_ASYNC_FLUSH_STATE;
-
#define MAX_ASYNC_SLEEP_USECS 100000 /* Maximum sleep waiting for work */
#define MAX_ASYNC_YIELD 200 /* Maximum number of yields for work */
@@ -53,7 +39,13 @@ struct __wt_async_op_impl {
uint64_t unique_id; /* Unique identifier. */
WT_ASYNC_FORMAT *format; /* Format structure */
- WT_ASYNC_STATE state; /* Op state */
+
+#define WT_ASYNCOP_ENQUEUED 0 /* Placed on the work queue */
+#define WT_ASYNCOP_FREE 1 /* Able to be allocated to user */
+#define WT_ASYNCOP_READY 2 /* Allocated, ready for user to use */
+#define WT_ASYNCOP_WORKING 3 /* Operation in progress by worker */
+ uint32_t state;
+
WT_ASYNC_OPTYPE optype; /* Operation type */
};
@@ -89,9 +81,15 @@ struct __wt_async {
uint64_t tail_slot; /* Worker slot consumed */
TAILQ_HEAD(__wt_async_format_qh, __wt_async_format) formatqh;
- int cur_queue; /* Currently enqueued */
- int max_queue; /* Maximum enqueued */
- WT_ASYNC_FLUSH_STATE flush_state; /* Queue flush state */
+ uint32_t cur_queue; /* Currently enqueued */
+ uint32_t max_queue; /* Maximum enqueued */
+
+#define WT_ASYNC_FLUSH_NONE 0 /* No flush in progress */
+#define WT_ASYNC_FLUSH_COMPLETE 1 /* Notify flush caller done */
+#define WT_ASYNC_FLUSH_IN_PROGRESS 2 /* Prevent other callers */
+#define WT_ASYNC_FLUSHING 3 /* Notify workers */
+ uint32_t flush_state;
+
/* Notify any waiting threads when flushing is done. */
WT_CONDVAR *flush_cond;
WT_ASYNC_OP_IMPL flush_op; /* Special flush op */
diff --git a/src/include/btmem.h b/src/include/btmem.h
index 1c04af1aef3..2933add6201 100644
--- a/src/include/btmem.h
+++ b/src/include/btmem.h
@@ -648,14 +648,6 @@ struct __wt_page {
* to the readers. If the evicting thread does not find a hazard pointer,
* the page is evicted.
*/
-typedef enum __wt_page_state {
- WT_REF_DISK=0, /* Page is on disk */
- WT_REF_DELETED, /* Page is on disk, but deleted */
- WT_REF_LOCKED, /* Page locked for exclusive access */
- WT_REF_MEM, /* Page is in cache and valid */
- WT_REF_READING, /* Page being read */
- WT_REF_SPLIT /* Page was split */
-} WT_PAGE_STATE;
/*
* WT_PAGE_DELETED --
@@ -683,7 +675,13 @@ struct __wt_ref {
WT_PAGE * volatile home; /* Reference page */
uint32_t pindex_hint; /* Reference page index hint */
- volatile WT_PAGE_STATE state; /* Page state */
+#define WT_REF_DISK 0 /* Page is on disk */
+#define WT_REF_DELETED 1 /* Page is on disk, but deleted */
+#define WT_REF_LOCKED 2 /* Page locked for exclusive access */
+#define WT_REF_MEM 3 /* Page is in cache and valid */
+#define WT_REF_READING 4 /* Page being read */
+#define WT_REF_SPLIT 5 /* Parent page split (WT_REF dead) */
+ volatile uint32_t state; /* Page state */
/*
* Address: on-page cell if read from backing block, off-page WT_ADDR
@@ -950,7 +948,7 @@ struct __wt_insert {
#define WT_PAGE_ALLOC_AND_SWAP(s, page, dest, v, count) do { \
if (((v) = (dest)) == NULL) { \
WT_ERR(__wt_calloc_def(s, count, &(v))); \
- if (WT_ATOMIC_CAS8(dest, NULL, v)) \
+ if (__wt_atomic_cas_ptr(&dest, NULL, v)) \
__wt_cache_page_inmem_incr( \
s, page, (count) * sizeof(*(v))); \
else \
diff --git a/src/include/btree.i b/src/include/btree.i
index 23cb54a4179..03de57d4d21 100644
--- a/src/include/btree.i
+++ b/src/include/btree.i
@@ -49,17 +49,17 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size)
WT_ASSERT(session, size < WT_EXABYTE);
cache = S2C(session)->cache;
- (void)WT_ATOMIC_ADD8(cache->bytes_inmem, size);
- (void)WT_ATOMIC_ADD8(page->memory_footprint, size);
+ (void)__wt_atomic_add64(&cache->bytes_inmem, size);
+ (void)__wt_atomic_add64(&page->memory_footprint, size);
if (__wt_page_is_modified(page)) {
- (void)WT_ATOMIC_ADD8(cache->bytes_dirty, size);
- (void)WT_ATOMIC_ADD8(page->modify->bytes_dirty, size);
+ (void)__wt_atomic_add64(&cache->bytes_dirty, size);
+ (void)__wt_atomic_add64(&page->modify->bytes_dirty, size);
}
/* Track internal and overflow size in cache. */
if (WT_PAGE_IS_INTERNAL(page))
- (void)WT_ATOMIC_ADD8(cache->bytes_internal, size);
+ (void)__wt_atomic_add64(&cache->bytes_internal, size);
else if (page->type == WT_PAGE_OVFL)
- (void)WT_ATOMIC_ADD8(cache->bytes_overflow, size);
+ (void)__wt_atomic_add64(&cache->bytes_overflow, size);
}
/*
@@ -73,8 +73,8 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size)
#ifdef HAVE_DIAGNOSTIC
#define WT_CACHE_DECR(session, f, sz) do { \
static int __first = 1; \
- if (WT_ATOMIC_SUB8(f, sz) > WT_EXABYTE) { \
- (void)WT_ATOMIC_ADD8(f, sz); \
+ if (__wt_atomic_sub64(&f, sz) > WT_EXABYTE) { \
+ (void)__wt_atomic_add64(&f, sz); \
if (__first) { \
__wt_errx(session, \
"%s underflow: decrementing %" WT_SIZET_FMT,\
@@ -85,8 +85,8 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size)
} while (0)
#else
#define WT_CACHE_DECR(s, f, sz) do { \
- if (WT_ATOMIC_SUB8(f, sz) > WT_EXABYTE) \
- (void)WT_ATOMIC_ADD8(f, sz); \
+ if (__wt_atomic_sub64(&f, sz) > WT_EXABYTE) \
+ (void)__wt_atomic_add64(&f, sz); \
} while (0)
#endif
@@ -128,8 +128,8 @@ __wt_cache_page_byte_dirty_decr(
*/
orig = page->modify->bytes_dirty;
decr = WT_MIN(size, orig);
- if (WT_ATOMIC_CAS8(
- page->modify->bytes_dirty, orig, orig - decr)) {
+ if (__wt_atomic_cas64(
+ &page->modify->bytes_dirty, orig, orig - decr)) {
WT_CACHE_DECR(session, cache->bytes_dirty, decr);
break;
}
@@ -172,15 +172,15 @@ __wt_cache_dirty_incr(WT_SESSION_IMPL *session, WT_PAGE *page)
size_t size;
cache = S2C(session)->cache;
- (void)WT_ATOMIC_ADD8(cache->pages_dirty, 1);
+ (void)__wt_atomic_add64(&cache->pages_dirty, 1);
/*
* Take care to read the memory_footprint once in case we are racing
* with updates.
*/
size = page->memory_footprint;
- (void)WT_ATOMIC_ADD8(cache->bytes_dirty, size);
- (void)WT_ATOMIC_ADD8(page->modify->bytes_dirty, size);
+ (void)__wt_atomic_add64(&cache->bytes_dirty, size);
+ (void)__wt_atomic_add64(&page->modify->bytes_dirty, size);
}
/*
@@ -202,7 +202,7 @@ __wt_cache_dirty_decr(WT_SESSION_IMPL *session, WT_PAGE *page)
"count went negative");
cache->pages_dirty = 0;
} else
- (void)WT_ATOMIC_SUB8(cache->pages_dirty, 1);
+ (void)__wt_atomic_sub64(&cache->pages_dirty, 1);
modify = page->modify;
if (modify != NULL && modify->bytes_dirty != 0)
@@ -239,8 +239,8 @@ __wt_cache_page_evict(WT_SESSION_IMPL *session, WT_PAGE *page)
}
/* Update pages and bytes evicted. */
- (void)WT_ATOMIC_ADD8(cache->bytes_evict, page->memory_footprint);
- (void)WT_ATOMIC_ADD8(cache->pages_evict, 1);
+ (void)__wt_atomic_add64(&cache->bytes_evict, page->memory_footprint);
+ (void)__wt_atomic_add64(&cache->pages_evict, 1);
}
/*
@@ -301,7 +301,7 @@ __wt_page_only_modify_set(WT_SESSION_IMPL *session, WT_PAGE *page)
* Every time the page transitions from clean to dirty, update the cache
* and transactional information.
*/
- if (WT_ATOMIC_ADD4(page->modify->write_gen, 1) == 1) {
+ if (__wt_atomic_add32(&page->modify->write_gen, 1) == 1) {
__wt_cache_dirty_incr(session, page);
/*
@@ -993,14 +993,14 @@ __wt_page_release_evict(WT_SESSION_IMPL *session, WT_REF *ref)
* reference without first locking the page, it could be evicted in
* between.
*/
- locked = WT_ATOMIC_CAS4(ref->state, WT_REF_MEM, WT_REF_LOCKED);
+ locked = __wt_atomic_casv32(&ref->state, WT_REF_MEM, WT_REF_LOCKED);
if ((ret = __wt_hazard_clear(session, page)) != 0 || !locked) {
if (locked)
ref->state = WT_REF_MEM;
return (ret == 0 ? EBUSY : ret);
}
- (void)WT_ATOMIC_ADD4(btree->evict_busy, 1);
+ (void)__wt_atomic_addv32(&btree->evict_busy, 1);
if ((ret = __wt_evict_page(session, ref)) == 0) {
if (too_big)
WT_STAT_FAST_CONN_INCR(session, cache_eviction_force);
@@ -1015,7 +1015,7 @@ __wt_page_release_evict(WT_SESSION_IMPL *session, WT_REF *ref)
} else
WT_STAT_FAST_CONN_INCR(session, cache_eviction_force_fail);
- (void)WT_ATOMIC_SUB4(btree->evict_busy, 1);
+ (void)__wt_atomic_subv32(&btree->evict_busy, 1);
return (ret);
}
diff --git a/src/include/cursor.i b/src/include/cursor.i
index 47b772377c0..97739c83b8f 100644
--- a/src/include/cursor.i
+++ b/src/include/cursor.i
@@ -150,7 +150,7 @@ __wt_cursor_dhandle_incr_use(WT_SESSION_IMPL *session)
dhandle = session->dhandle;
/* If we open a handle with a time of death set, clear it. */
- if (WT_ATOMIC_ADD4(dhandle->session_inuse, 1) == 1 &&
+ if (__wt_atomic_addi32(&dhandle->session_inuse, 1) == 1 &&
dhandle->timeofdeath != 0)
dhandle->timeofdeath = 0;
}
@@ -168,7 +168,7 @@ __wt_cursor_dhandle_decr_use(WT_SESSION_IMPL *session)
/* If we close a handle with a time of death set, clear it. */
WT_ASSERT(session, dhandle->session_inuse > 0);
- if (WT_ATOMIC_SUB4(dhandle->session_inuse, 1) == 0 &&
+ if (__wt_atomic_subi32(&dhandle->session_inuse, 1) == 0 &&
dhandle->timeofdeath != 0)
dhandle->timeofdeath = 0;
}
diff --git a/src/include/gcc.h b/src/include/gcc.h
index 7135bd479c7..99c53d3d96d 100644
--- a/src/include/gcc.h
+++ b/src/include/gcc.h
@@ -85,63 +85,70 @@
* In summary, locking > barriers > volatile.
*
* To avoid locking shared data structures such as statistics and to permit
- * atomic state changes, we rely on the WT_ATOMIC_ADD and WT_ATOMIC_CAS
- * (compare and swap) operations.
+ * atomic state changes, we rely on the atomic-add and atomic-cas (compare and
+ * swap) operations.
*/
-#define __WT_ATOMIC_ADD(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), __sync_add_and_fetch(&(v), val))
-#define __WT_ATOMIC_FETCH_ADD(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), __sync_fetch_and_add(&(v), val))
+
#ifdef __clang__
/*
- * We avoid __sync_bool_compare_and_swap with due to problems with
- * optimization with some versions of clang. See
- * http://llvm.org/bugs/show_bug.cgi?id=21499 for details.
+ * We avoid __sync_bool_compare_and_swap with due to problems with optimization
+ * with some versions of clang. See http://llvm.org/bugs/show_bug.cgi?id=21499
+ * for details.
*/
-#define __WT_ATOMIC_CAS(v, old, new, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_val_compare_and_swap(&(v), old, new) == (old))
+#define WT_ATOMIC_CAS(ptr, oldval, newval) \
+ (__sync_val_compare_and_swap(ptr, oldval, newval) == oldval)
#else
-#define __WT_ATOMIC_CAS(v, old, new, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_bool_compare_and_swap(&(v), old, new))
+#define WT_ATOMIC_CAS(ptr, oldval, newval) \
+ __sync_bool_compare_and_swap(ptr, oldval, newval)
#endif
-#define __WT_ATOMIC_CAS_VAL(v, old, new, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_val_compare_and_swap(&(v), old, new))
-#define __WT_ATOMIC_STORE(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_lock_test_and_set(&(v), val))
-#define __WT_ATOMIC_SUB(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), __sync_sub_and_fetch(&(v), val))
-
-#define WT_ATOMIC_ADD1(v, val) __WT_ATOMIC_ADD(v, val, 1)
-#define WT_ATOMIC_FETCH_ADD1(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 1)
-#define WT_ATOMIC_CAS1(v, old, new) __WT_ATOMIC_CAS(v, old, new, 1)
-#define WT_ATOMIC_CAS_VAL1(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 1)
-#define WT_ATOMIC_STORE1(v, val) __WT_ATOMIC_STORE(v, val, 1)
-#define WT_ATOMIC_SUB1(v, val) __WT_ATOMIC_SUB(v, val, 1)
-#define WT_ATOMIC_ADD2(v, val) __WT_ATOMIC_ADD(v, val, 2)
-#define WT_ATOMIC_FETCH_ADD2(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 2)
-#define WT_ATOMIC_CAS2(v, old, new) __WT_ATOMIC_CAS(v, old, new, 2)
-#define WT_ATOMIC_CAS_VAL2(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 2)
-#define WT_ATOMIC_STORE2(v, val) __WT_ATOMIC_STORE(v, val, 2)
-#define WT_ATOMIC_SUB2(v, val) __WT_ATOMIC_SUB(v, val, 2)
+#define WT_ATOMIC_FUNC(name, ret, type) \
+static inline ret \
+__wt_atomic_add##name(type *vp, type v) \
+{ \
+ return (__sync_add_and_fetch(vp, v)); \
+} \
+static inline ret \
+__wt_atomic_fetch_add##name(type *vp, type v) \
+{ \
+ return (__sync_fetch_and_add(vp, v)); \
+} \
+static inline ret \
+__wt_atomic_store##name(type *vp, type v) \
+{ \
+ return (__sync_lock_test_and_set(vp, v)); \
+} \
+static inline ret \
+__wt_atomic_sub##name(type *vp, type v) \
+{ \
+ return (__sync_sub_and_fetch(vp, v)); \
+} \
+static inline int \
+__wt_atomic_cas##name(type *vp, type old, type new) \
+{ \
+ return (WT_ATOMIC_CAS(vp, old, new)); \
+}
-#define WT_ATOMIC_ADD4(v, val) __WT_ATOMIC_ADD(v, val, 4)
-#define WT_ATOMIC_FETCH_ADD4(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 4)
-#define WT_ATOMIC_CAS4(v, old, new) __WT_ATOMIC_CAS(v, old, new, 4)
-#define WT_ATOMIC_CAS_VAL4(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 4)
-#define WT_ATOMIC_STORE4(v, val) __WT_ATOMIC_STORE(v, val, 4)
-#define WT_ATOMIC_SUB4(v, val) __WT_ATOMIC_SUB(v, val, 4)
+WT_ATOMIC_FUNC(8, uint8_t, uint8_t)
+WT_ATOMIC_FUNC(16, uint16_t, uint16_t)
+WT_ATOMIC_FUNC(32, uint32_t, uint32_t)
+WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t)
+WT_ATOMIC_FUNC(i32, int32_t, int32_t)
+WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t)
+WT_ATOMIC_FUNC(64, uint64_t, uint64_t)
+WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t)
+WT_ATOMIC_FUNC(i64, int64_t, int64_t)
+WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t)
-#define WT_ATOMIC_ADD8(v, val) __WT_ATOMIC_ADD(v, val, 8)
-#define WT_ATOMIC_FETCH_ADD8(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 8)
-#define WT_ATOMIC_CAS8(v, old, new) __WT_ATOMIC_CAS(v, old, new, 8)
-#define WT_ATOMIC_CAS_VAL8(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 8)
-#define WT_ATOMIC_STORE8(v, val) __WT_ATOMIC_STORE(v, val, 8)
-#define WT_ATOMIC_SUB8(v, val) __WT_ATOMIC_SUB(v, val, 8)
+/*
+ * __wt_atomic_cas_ptr --
+ * Pointer compare and swap.
+ */
+static inline int
+__wt_atomic_cas_ptr(void *vp, void *old, void *new)
+{
+ return (WT_ATOMIC_CAS((void **)vp, old, new));
+}
/* Compile read-write barrier */
#define WT_BARRIER() __asm__ volatile("" ::: "memory")
diff --git a/src/include/hardware.h b/src/include/hardware.h
index 1c3fb287e86..bb909df657d 100644
--- a/src/include/hardware.h
+++ b/src/include/hardware.h
@@ -33,8 +33,8 @@
uint8_t __orig; \
do { \
__orig = (p)->flags_atomic; \
- } while (!WT_ATOMIC_CAS1((p)->flags_atomic, \
- __orig, __orig | (uint8_t)(mask))); \
+ } while (!__wt_atomic_cas8( \
+ &(p)->flags_atomic, __orig, __orig | (uint8_t)(mask))); \
} while (0)
#define F_CAS_ATOMIC(p, mask, ret) do { \
@@ -46,16 +46,16 @@
ret = EBUSY; \
break; \
} \
- } while (!WT_ATOMIC_CAS1((p)->flags_atomic, \
- __orig, __orig | (uint8_t)(mask))); \
+ } while (!__wt_atomic_cas8( \
+ &(p)->flags_atomic, __orig, __orig | (uint8_t)(mask))); \
} while (0)
#define F_CLR_ATOMIC(p, mask) do { \
uint8_t __orig; \
do { \
__orig = (p)->flags_atomic; \
- } while (!WT_ATOMIC_CAS1((p)->flags_atomic, \
- __orig, __orig & ~(uint8_t)(mask))); \
+ } while (!__wt_atomic_cas8( \
+ &(p)->flags_atomic, __orig, __orig & ~(uint8_t)(mask))); \
} while (0)
#define WT_CACHE_LINE_ALIGNMENT 64 /* Cache line alignment */
diff --git a/src/include/lint.h b/src/include/lint.h
index 631f00cb5cd..dae5918380a 100644
--- a/src/include/lint.h
+++ b/src/include/lint.h
@@ -18,46 +18,70 @@
#define WT_GCC_FUNC_ATTRIBUTE(x)
#define WT_GCC_FUNC_DECL_ATTRIBUTE(x)
-#define __WT_ATOMIC_ADD(v, val) \
- ((v) += (val))
-#define __WT_ATOMIC_FETCH_ADD(v, val) \
- ((v) += (val), (v))
-#define __WT_ATOMIC_CAS(v, old, new) \
- ((v) = ((v) == (old) ? (new) : (old)), (v) == (old))
-#define __WT_ATOMIC_CAS_VAL(v, old, new) \
- ((v) = ((v) == (old) ? (new) : (old)), (v) == (old))
-#define __WT_ATOMIC_STORE(v, val) \
- ((v) = (val))
-#define __WT_ATOMIC_SUB(v, val) \
- ((v) -= (val), (v))
+#define WT_ATOMIC_FUNC(name, ret, type) \
+static inline ret \
+__wt_atomic_add##name(type *vp, type v) \
+{ \
+ *vp += v; \
+ return (*vp); \
+} \
+static inline ret \
+__wt_atomic_fetch_add##name(type *vp, type v) \
+{ \
+ type orig; \
+ \
+ old = *vp; \
+ *vp += v; \
+ return (old); \
+} \
+static inline ret \
+__wt_atomic_store##name(type *vp, type v) \
+{ \
+ type orig; \
+ \
+ orig = *vp; \
+ *vp = v; \
+ return (old); \
+} \
+static inline ret \
+__wt_atomic_sub##name(type *vp, type v) \
+{ \
+ *vp -= v; \
+ return (*vp); \
+} \
+static inline int \
+__wt_atomic_cas##name(type *vp, type old, type new) \
+{ \
+ if (*vp == old) { \
+ *vp = new; \
+ return (1); \
+ } \
+ return (0); \
+}
-#define WT_ATOMIC_ADD1(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD1(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS1(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL1(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE1(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB1(v, val) __WT_ATOMIC_SUB(v, val)
+WT_ATOMIC_FUNC(8, uint8_t, uint8_t)
+WT_ATOMIC_FUNC(16, uint16_t, uint16_t)
+WT_ATOMIC_FUNC(32, uint32_t, uint32_t)
+WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t)
+WT_ATOMIC_FUNC(i32, int32_t, int32_t)
+WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t)
+WT_ATOMIC_FUNC(64, uint64_t, uint64_t)
+WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t)
+WT_ATOMIC_FUNC(i64, int64_t, int64_t)
+WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t)
-#define WT_ATOMIC_ADD2(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD2(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS2(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL2(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE2(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB2(v, val) __WT_ATOMIC_SUB(v, val)
-
-#define WT_ATOMIC_ADD4(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD4(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS4(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL4(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE4(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB4(v, val) __WT_ATOMIC_SUB(v, val)
-
-#define WT_ATOMIC_ADD8(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD8(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS8(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL8(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE8(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB8(v, val) __WT_ATOMIC_SUB(v, val)
+/*
+ * __wt_atomic_cas_ptr --
+ * Pointer compare and swap.
+ */
+static inline int
+__wt_atomic_cas_ptr(void *vp, void *old, void *new) {
+ if (*(void **)vp == old) {
+ *(void **)vp = new;
+ return (1);
+ }
+ return (0);
+}
static inline void WT_BARRIER(void) { return; }
static inline void WT_FULL_BARRIER(void) { return; }
diff --git a/src/include/lsm.h b/src/include/lsm.h
index dc6a0d7e027..d85d2aff812 100644
--- a/src/include/lsm.h
+++ b/src/include/lsm.h
@@ -171,11 +171,11 @@ struct __wt_lsm_tree {
const char *collator_name;
int collator_owned;
- int refcnt; /* Number of users of the tree */
- int8_t exclusive; /* Tree is locked exclusively */
+ uint32_t refcnt; /* Number of users of the tree */
+ uint8_t exclusive; /* Tree is locked exclusively */
#define LSM_TREE_MAX_QUEUE 100
- int queue_ref;
+ uint32_t queue_ref;
WT_RWLOCK *rwlock;
TAILQ_ENTRY(__wt_lsm_tree) q;
@@ -215,7 +215,7 @@ struct __wt_lsm_tree {
WT_LSM_CHUNK **old_chunks; /* Array of old LSM chunks */
size_t old_alloc; /* Space allocated for old chunks */
u_int nold_chunks; /* Number of old chunks */
- int freeing_old_chunks; /* Whether chunks are being freed */
+ uint32_t freeing_old_chunks; /* Whether chunks are being freed */
uint32_t merge_aggressiveness; /* Increase amount of work per merge */
#define WT_LSM_TREE_ACTIVE 0x01 /* Workers are active */
diff --git a/src/include/msvc.h b/src/include/msvc.h
index fa5b2d848e8..c35406c7f93 100644
--- a/src/include/msvc.h
+++ b/src/include/msvc.h
@@ -31,63 +31,55 @@
#define WT_GCC_FUNC_ATTRIBUTE(x)
#define WT_GCC_FUNC_DECL_ATTRIBUTE(x)
-#define __WT_ATOMIC_ADD(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchangeAdd ## s((t*)&(v), (t)(val)) + (val))
-#define __WT_ATOMIC_FETCH_ADD(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchangeAdd ## s((t*)&(v), (t)(val)))
-#define __WT_ATOMIC_CAS(v, old, new, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedCompareExchange ## s \
- ((t*)&(v), (t)(new), (t)(old)) == (t)(old))
-#define __WT_ATOMIC_CAS_VAL(v, old, new, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedCompareExchange ## s((t*)&(v), (t)(new), (t)(old)))
-#define __WT_ATOMIC_STORE(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchange ## s((t*)&(v), (t)(val)))
-#define __WT_ATOMIC_SUB(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchangeAdd ## s((t*)&(v), -(t) val) - (val))
+#define WT_ATOMIC_FUNC(name, ret, type, s, t) \
+static inline ret \
+__wt_atomic_add##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchangeAdd ## s((t *)(vp), (t)(v)) + (v)); \
+} \
+static inline ret \
+__wt_atomic_fetch_add##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchangeAdd ## s((t *)(vp), (t)(v))); \
+} \
+static inline ret \
+__wt_atomic_store##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchange ## s((t *)(vp), (t)(v))); \
+} \
+static inline ret \
+__wt_atomic_sub##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchangeAdd ## s((t *)(vp), - (t)v) - (v)); \
+} \
+static inline int \
+__wt_atomic_cas##name(type *vp, type old, type new) \
+{ \
+ return (_InterlockedCompareExchange ## s \
+ ((t *)(vp), (t)(new), (t)(old)) == (t)(old)); \
+}
-#define WT_ATOMIC_ADD1(v, val) __WT_ATOMIC_ADD(v, val, 1, 8, char)
-#define WT_ATOMIC_FETCH_ADD1(v, val) \
- __WT_ATOMIC_FETCH_ADD(v, val, 1, 8, char)
-#define WT_ATOMIC_CAS1(v, old, new) __WT_ATOMIC_CAS(v, old, new, 1, 8, char)
-#define WT_ATOMIC_CAS_VAL1(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 1, 8, char)
-#define WT_ATOMIC_STORE1(v, val) __WT_ATOMIC_STORE(v, val, 1, 8, char)
-#define WT_ATOMIC_SUB1(v, val) __WT_ATOMIC_SUB(v, val, 1, 8, char)
+WT_ATOMIC_FUNC(8, uint8_t, uint8_t, 8, char)
+WT_ATOMIC_FUNC(16, uint16_t, uint16_t, 16, short)
+WT_ATOMIC_FUNC(32, uint32_t, uint32_t, , long)
+WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t, , long)
+WT_ATOMIC_FUNC(i32, int32_t, int32_t, , long)
+WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t, , long)
+WT_ATOMIC_FUNC(64, uint64_t, uint64_t, 64, __int64)
+WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t, 64, __int64)
+WT_ATOMIC_FUNC(i64, int64_t, int64_t, 64, __int64)
+WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t, 64, __int64)
-#define WT_ATOMIC_ADD2(v, val) __WT_ATOMIC_ADD(v, val, 2, 16, short)
-#define WT_ATOMIC_FETCH_ADD2(v, val) \
- __WT_ATOMIC_FETCH_ADD(v, val, 2, 16, short)
-#define WT_ATOMIC_CAS2(v, old, new) \
- __WT_ATOMIC_CAS(v, old, new, 2, 16, short)
-#define WT_ATOMIC_CAS_VAL2(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 2, 16, short)
-#define WT_ATOMIC_STORE2(v, val) __WT_ATOMIC_STORE(v, val, 2, 16, short)
-#define WT_ATOMIC_SUB2(v, val) __WT_ATOMIC_SUB(v, val, 2, 16, short)
-
-#define WT_ATOMIC_ADD4(v, val) __WT_ATOMIC_ADD(v, val, 4, , long)
-#define WT_ATOMIC_FETCH_ADD4(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 4, , long)
-#define WT_ATOMIC_CAS4(v, old, new) __WT_ATOMIC_CAS(v, old, new, 4, , long)
-#define WT_ATOMIC_CAS_VAL4(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 4, , long)
-#define WT_ATOMIC_STORE4(v, val) __WT_ATOMIC_STORE(v, val, 4, , long)
-#define WT_ATOMIC_SUB4(v, val) __WT_ATOMIC_SUB(v, val, 4, , long)
-
-#define WT_ATOMIC_ADD8(v, val) __WT_ATOMIC_ADD(v, val, 8, 64, __int64)
-#define WT_ATOMIC_FETCH_ADD8(v, val) \
- __WT_ATOMIC_FETCH_ADD(v, val, 8, 64, __int64)
-#define WT_ATOMIC_CAS8(v, old, new) \
- __WT_ATOMIC_CAS(v, old, new, 8, 64, __int64)
-#define WT_ATOMIC_CAS_VAL8(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 8, 64, __int64)
-#define WT_ATOMIC_STORE8(v, val) \
- __WT_ATOMIC_STORE(v, val, 8, 64, __int64)
-#define WT_ATOMIC_SUB8(v, val) __WT_ATOMIC_SUB(v, val, 8, 64, __int64)
+/*
+ * __wt_atomic_cas_ptr --
+ * Pointer compare and swap.
+ */
+static inline int
+__wt_atomic_cas_ptr(void *vp, void *old, void *new)
+{
+ return (_InterlockedCompareExchange64(
+ vp, (int64_t)new, (int64_t)old) == ((int64_t)old));
+}
static inline void WT_BARRIER(void) { _ReadWriteBarrier(); }
static inline void WT_FULL_BARRIER(void) { _mm_mfence(); }
diff --git a/src/include/serial.i b/src/include/serial.i
index 0fc23348800..7b62e66eccb 100644
--- a/src/include/serial.i
+++ b/src/include/serial.i
@@ -56,7 +56,7 @@ __insert_simple_func(WT_SESSION_IMPL *session,
for (i = 0; i < skipdepth; i++) {
WT_INSERT *old_ins = *ins_stack[i];
if (old_ins != new_ins->next[i] ||
- !WT_ATOMIC_CAS8(*ins_stack[i], old_ins, new_ins))
+ !__wt_atomic_cas_ptr(ins_stack[i], old_ins, new_ins))
return (i == 0 ? WT_RESTART : 0);
}
@@ -93,7 +93,7 @@ __insert_serial_func(WT_SESSION_IMPL *session, WT_INSERT_HEAD *ins_head,
for (i = 0; i < skipdepth; i++) {
WT_INSERT *old_ins = *ins_stack[i];
if (old_ins != new_ins->next[i] ||
- !WT_ATOMIC_CAS8(*ins_stack[i], old_ins, new_ins))
+ !__wt_atomic_cas_ptr(ins_stack[i], old_ins, new_ins))
return (i == 0 ? WT_RESTART : 0);
if (ins_head->tail[i] == NULL ||
ins_stack[i] == &ins_head->tail[i]->next[i])
@@ -271,7 +271,7 @@ __wt_update_serial(WT_SESSION_IMPL *session, WT_PAGE *page,
* Swap the update into place. If that fails, a new update was added
* after our search, we raced. Check if our update is still permitted.
*/
- while (!WT_ATOMIC_CAS8(*srch_upd, upd->next, upd)) {
+ while (!__wt_atomic_cas_ptr(srch_upd, upd->next, upd)) {
if ((ret = __wt_txn_update_check(
session, upd->next = *srch_upd)) != 0) {
/* Free unused memory on error. */
diff --git a/src/include/stat.h b/src/include/stat.h
index aab251f0e31..df8f950f330 100644
--- a/src/include/stat.h
+++ b/src/include/stat.h
@@ -17,11 +17,11 @@ struct __wt_stats {
#define WT_STAT(stats, fld) \
((stats)->fld.v)
#define WT_STAT_ATOMIC_DECRV(stats, fld, value) do { \
- (void)WT_ATOMIC_SUB8(WT_STAT(stats, fld), (value)); \
+ (void)__wt_atomic_sub64(&WT_STAT(stats, fld), (value)); \
} while (0)
#define WT_STAT_ATOMIC_DECR(stats, fld) WT_STAT_ATOMIC_DECRV(stats, fld, 1)
#define WT_STAT_ATOMIC_INCRV(stats, fld, value) do { \
- (void)WT_ATOMIC_ADD8(WT_STAT(stats, fld), (value)); \
+ (void)__wt_atomic_add64(&WT_STAT(stats, fld), (value)); \
} while (0)
#define WT_STAT_ATOMIC_INCR(stats, fld) WT_ATOMIC_ADD8(WT_STAT(stats, fld), 1)
#define WT_STAT_DECRV(stats, fld, value) do { \
diff --git a/src/include/txn.i b/src/include/txn.i
index 95a8f99cf1b..a262672630f 100644
--- a/src/include/txn.i
+++ b/src/include/txn.i
@@ -284,7 +284,7 @@ __wt_txn_new_id(WT_SESSION_IMPL *session)
* global current ID, so we want post-increment semantics. Our atomic
* add primitive does pre-increment, so adjust the result here.
*/
- return (WT_ATOMIC_ADD8(S2C(session)->txn_global.current, 1) - 1);
+ return (__wt_atomic_addv64(&S2C(session)->txn_global.current, 1) - 1);
}
/*
@@ -360,8 +360,9 @@ __wt_txn_id_check(WT_SESSION_IMPL *session)
*/
do {
txn_state->id = txn->id = txn_global->current;
- } while (!WT_ATOMIC_CAS8(
- txn_global->current, txn->id, txn->id + 1));
+ } while (!__wt_atomic_casv64(
+ &txn_global->current, txn->id, txn->id + 1) ||
+ TXNID_LT(txn->id, txn_global->last_running));
/*
* If we have used 64-bits of transaction IDs, there is nothing
diff --git a/src/log/log_slot.c b/src/log/log_slot.c
index a08a9aff001..b6b5f6bc896 100644
--- a/src/log/log_slot.c
+++ b/src/log/log_slot.c
@@ -104,7 +104,7 @@ __wt_log_slot_join(WT_SESSION_IMPL *session, uint64_t mysize,
WT_CONNECTION_IMPL *conn;
WT_LOG *log;
WT_LOGSLOT *slot;
- int64_t cur_state, new_state, old_state;
+ int64_t new_state, old_state;
uint32_t allocated_slot, slot_attempts;
conn = S2C(session);
@@ -118,8 +118,8 @@ __wt_log_slot_join(WT_SESSION_IMPL *session, uint64_t mysize,
find_slot:
allocated_slot = __wt_random(&session->rnd) % WT_SLOT_ACTIVE;
slot = log->slot_array[allocated_slot];
- old_state = slot->slot_state;
join_slot:
+ old_state = slot->slot_state;
/*
* WT_LOG_SLOT_READY and higher means the slot is available for
* joining. Any other state means it is in use and transitioning
@@ -150,13 +150,11 @@ join_slot:
}
goto find_slot;
}
- cur_state = WT_ATOMIC_CAS_VAL8(slot->slot_state, old_state, new_state);
/*
* We lost a race to add our size into this slot. Check the state
* and try again.
*/
- if (cur_state != old_state) {
- old_state = cur_state;
+ if (!__wt_atomic_casiv64(&slot->slot_state, old_state, new_state)) {
WT_STAT_FAST_CONN_INCR(session, log_slot_races);
goto join_slot;
}
@@ -232,7 +230,8 @@ retry:
newslot->slot_state = WT_LOG_SLOT_READY;
newslot->slot_index = slot->slot_index;
log->slot_array[newslot->slot_index] = &log->slot_pool[pool_i];
- old_state = WT_ATOMIC_STORE8(slot->slot_state, WT_LOG_SLOT_PENDING);
+ old_state =
+ __wt_atomic_storeiv64(&slot->slot_state, WT_LOG_SLOT_PENDING);
slot->slot_group_size = (uint64_t)(old_state - WT_LOG_SLOT_READY);
/*
* Note that this statistic may be much bigger than in reality,
@@ -288,14 +287,11 @@ __wt_log_slot_wait(WT_SESSION_IMPL *session, WT_LOGSLOT *slot)
int64_t
__wt_log_slot_release(WT_LOGSLOT *slot, uint64_t size)
{
- int64_t newsize;
-
/*
* Add my size into the state. When it reaches WT_LOG_SLOT_DONE
* all participatory threads have completed copying their piece.
*/
- newsize = WT_ATOMIC_ADD8(slot->slot_state, (int64_t)size);
- return (newsize);
+ return (__wt_atomic_addiv64(&slot->slot_state, (int64_t)size));
}
/*
diff --git a/src/lsm/lsm_manager.c b/src/lsm/lsm_manager.c
index 1ea41f24ee2..0e22af3b623 100644
--- a/src/lsm/lsm_manager.c
+++ b/src/lsm/lsm_manager.c
@@ -259,7 +259,7 @@ __wt_lsm_manager_free_work_unit(
if (entry != NULL) {
WT_ASSERT(session, entry->lsm_tree->queue_ref > 0);
- (void)WT_ATOMIC_SUB4(entry->lsm_tree->queue_ref, 1);
+ (void)__wt_atomic_sub32(&entry->lsm_tree->queue_ref, 1);
__wt_free(session, entry);
}
}
@@ -673,7 +673,7 @@ __wt_lsm_manager_push_entry(WT_SESSION_IMPL *session,
entry->type = type;
entry->flags = flags;
entry->lsm_tree = lsm_tree;
- (void)WT_ATOMIC_ADD4(lsm_tree->queue_ref, 1);
+ (void)__wt_atomic_add32(&lsm_tree->queue_ref, 1);
WT_STAT_FAST_CONN_INCR(session, lsm_work_units_created);
if (type == WT_LSM_WORK_SWITCH)
diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c
index 6ca1b0f04ab..de7ea37e498 100644
--- a/src/lsm/lsm_merge.c
+++ b/src/lsm/lsm_merge.c
@@ -284,7 +284,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
locked = 0;
/* Allocate an ID for the merge. */
- dest_id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ dest_id = __wt_atomic_add32(&lsm_tree->last, 1);
/*
* We only want to do the chunk loop if we're running with verbose,
@@ -375,7 +375,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
* merge_syncing field so that compact knows it is still in
* progress.
*/
- (void)WT_ATOMIC_ADD4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_add32(&lsm_tree->merge_syncing, 1);
in_sync = 1;
/*
* We've successfully created the new chunk. Now install it. We need
@@ -426,7 +426,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
WT_TRET(dest->close(dest));
dest = NULL;
++lsm_tree->merge_progressing;
- (void)WT_ATOMIC_SUB4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1);
in_sync = 0;
WT_ERR_NOTFOUND_OK(ret);
@@ -482,7 +482,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
err: if (locked)
WT_TRET(__wt_lsm_tree_writeunlock(session, lsm_tree));
if (in_sync)
- (void)WT_ATOMIC_SUB4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1);
if (src != NULL)
WT_TRET(src->close(src));
if (dest != NULL)
diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c
index 63f19858279..3c8f4d5750a 100644
--- a/src/lsm/lsm_tree.c
+++ b/src/lsm/lsm_tree.c
@@ -134,7 +134,7 @@ __wt_lsm_tree_close_all(WT_SESSION_IMPL *session)
* is no need to decrement the reference count since discard
* is unconditional.
*/
- (void)WT_ATOMIC_ADD4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_add32(&lsm_tree->refcnt, 1);
WT_TRET(__lsm_tree_close(session, lsm_tree));
WT_TRET(__lsm_tree_discard(session, lsm_tree, 1));
}
@@ -474,15 +474,17 @@ __lsm_tree_find(WT_SESSION_IMPL *session,
* Make sure we win the race to switch on the
* exclusive flag.
*/
- if (!WT_ATOMIC_CAS1(lsm_tree->exclusive, 0, 1))
+ if (!__wt_atomic_cas8(
+ &lsm_tree->exclusive, 0, 1))
return (EBUSY);
/* Make sure there are no readers */
- if (!WT_ATOMIC_CAS4(lsm_tree->refcnt, 0, 1)) {
+ if (!__wt_atomic_cas32(
+ &lsm_tree->refcnt, 0, 1)) {
lsm_tree->exclusive = 0;
return (EBUSY);
}
} else {
- (void)WT_ATOMIC_ADD4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_add32(&lsm_tree->refcnt, 1);
/*
* We got a reference, check if an exclusive
@@ -491,8 +493,8 @@ __lsm_tree_find(WT_SESSION_IMPL *session,
if (lsm_tree->exclusive) {
WT_ASSERT(session,
lsm_tree->refcnt > 0);
- (void)WT_ATOMIC_SUB4(
- lsm_tree->refcnt, 1);
+ (void)__wt_atomic_sub32(
+ &lsm_tree->refcnt, 1);
return (EBUSY);
}
}
@@ -553,7 +555,7 @@ __lsm_tree_open(WT_SESSION_IMPL *session,
WT_ASSERT(session, F_ISSET(session, WT_SESSION_HANDLE_LIST_LOCKED));
/* Start the LSM manager thread if it isn't running. */
- if (WT_ATOMIC_CAS4(conn->lsm_manager.lsm_workers, 0, 1))
+ if (__wt_atomic_cas32(&conn->lsm_manager.lsm_workers, 0, 1))
WT_RET(__wt_lsm_manager_start(session));
/* Make sure no one beat us to it. */
@@ -632,7 +634,7 @@ __wt_lsm_tree_release(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
WT_ASSERT(session, lsm_tree->refcnt > 0);
if (lsm_tree->exclusive)
lsm_tree->exclusive = 0;
- (void)WT_ATOMIC_SUB4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->refcnt, 1);
}
/* How aggressively to ramp up or down throttle due to level 0 merging */
@@ -827,7 +829,7 @@ __wt_lsm_tree_switch(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
/* Update the throttle time. */
__wt_lsm_tree_throttle(session, lsm_tree, 0);
- new_id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ new_id = __wt_atomic_add32(&lsm_tree->last, 1);
WT_ERR(__wt_realloc_def(session, &lsm_tree->chunk_alloc,
nchunks + 1, &lsm_tree->chunk));
@@ -1085,7 +1087,7 @@ __wt_lsm_tree_truncate(
/* Create the new chunk. */
WT_ERR(__wt_calloc_one(session, &chunk));
- chunk->id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ chunk->id = __wt_atomic_add32(&lsm_tree->last, 1);
WT_ERR(__wt_lsm_tree_setup_chunk(session, lsm_tree, chunk));
/* Mark all chunks old. */
@@ -1195,7 +1197,8 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
WT_LSM_TREE *lsm_tree;
time_t begin, end;
uint64_t progress;
- int i, compacting, flushing, locked, ref;
+ uint32_t i;
+ int compacting, flushing, locked, ref;
compacting = flushing = locked = ref = 0;
chunk = NULL;
@@ -1258,7 +1261,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
* If we have a chunk, we want to look for it to be on-disk.
* So we need to add a reference to keep it available.
*/
- (void)WT_ATOMIC_ADD4(chunk->refcnt, 1);
+ (void)__wt_atomic_add32(&chunk->refcnt, 1);
ref = 1;
}
@@ -1306,7 +1309,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
"Start compacting progress %" PRIu64,
name, chunk->id,
lsm_tree->merge_progressing));
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
flushing = ref = 0;
compacting = 1;
F_SET(lsm_tree, WT_LSM_TREE_COMPACTING);
@@ -1360,7 +1363,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
err:
/* Ensure anything we set is cleared. */
if (ref)
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
if (compacting) {
F_CLR(lsm_tree, WT_LSM_TREE_COMPACTING);
lsm_tree->merge_aggressiveness = 0;
diff --git a/src/lsm/lsm_work_unit.c b/src/lsm/lsm_work_unit.c
index 4f5e1516f1c..3e0fd43d404 100644
--- a/src/lsm/lsm_work_unit.c
+++ b/src/lsm/lsm_work_unit.c
@@ -53,7 +53,7 @@ __lsm_copy_chunks(WT_SESSION_IMPL *session,
* it's safe.
*/
for (i = 0; i < nchunks; i++)
- (void)WT_ATOMIC_ADD4(cookie->chunk_array[i]->refcnt, 1);
+ (void)__wt_atomic_add32(&cookie->chunk_array[i]->refcnt, 1);
err: WT_TRET(__wt_lsm_tree_readunlock(session, lsm_tree));
@@ -122,7 +122,7 @@ __wt_lsm_get_chunk_to_flush(WT_SESSION_IMPL *session,
force ? " w/ force" : "",
i, lsm_tree->nchunks, chunk->uri));
- (void)WT_ATOMIC_ADD4(chunk->refcnt, 1);
+ (void)__wt_atomic_add32(&chunk->refcnt, 1);
}
err: WT_RET(__wt_lsm_tree_readunlock(session, lsm_tree));
@@ -145,7 +145,7 @@ __lsm_unpin_chunks(WT_SESSION_IMPL *session, WT_LSM_WORKER_COOKIE *cookie)
if (cookie->chunk_array[i] == NULL)
continue;
WT_ASSERT(session, cookie->chunk_array[i]->refcnt > 0);
- (void)WT_ATOMIC_SUB4(cookie->chunk_array[i]->refcnt, 1);
+ (void)__wt_atomic_sub32(&cookie->chunk_array[i]->refcnt, 1);
}
/* Ensure subsequent calls don't double decrement. */
cookie->nchunks = 0;
@@ -219,7 +219,7 @@ __wt_lsm_work_bloom(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* See if we win the race to switch on the "busy" flag and
* recheck that the chunk still needs a Bloom filter.
*/
- if (WT_ATOMIC_CAS4(chunk->bloom_busy, 0, 1)) {
+ if (__wt_atomic_cas32(&chunk->bloom_busy, 0, 1)) {
if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) {
ret = __lsm_bloom_create(
session, lsm_tree, chunk, (u_int)i);
@@ -541,7 +541,7 @@ __wt_lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* Make sure only a single thread is freeing the old chunk array
* at any time.
*/
- if (!WT_ATOMIC_CAS4(lsm_tree->freeing_old_chunks, 0, 1))
+ if (!__wt_atomic_cas32(&lsm_tree->freeing_old_chunks, 0, 1))
return (0);
/*
* Take a copy of the current state of the LSM tree and look for chunks
diff --git a/src/lsm/lsm_worker.c b/src/lsm/lsm_worker.c
index d1272df763d..252523c5c57 100644
--- a/src/lsm/lsm_worker.c
+++ b/src/lsm/lsm_worker.c
@@ -65,7 +65,7 @@ __lsm_worker_general_op(
ret = __wt_lsm_checkpoint_chunk(
session, entry->lsm_tree, chunk);
WT_ASSERT(session, chunk->refcnt > 0);
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
WT_ERR(ret);
}
} else if (entry->type == WT_LSM_WORK_DROP)
diff --git a/src/os_posix/os_mtx_cond.c b/src/os_posix/os_mtx_cond.c
index dfd72dd0cd2..baf9b475777 100644
--- a/src/os_posix/os_mtx_cond.c
+++ b/src/os_posix/os_mtx_cond.c
@@ -54,7 +54,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
locked = 0;
/* Fast path if already signalled. */
- if (WT_ATOMIC_ADD4(cond->waiters, 1) == 0)
+ if (__wt_atomic_addi32(&cond->waiters, 1) == 0)
return (0);
/*
@@ -91,7 +91,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
ret == ETIMEDOUT)
ret = 0;
- (void)WT_ATOMIC_SUB4(cond->waiters, 1);
+ (void)__wt_atomic_subi32(&cond->waiters, 1);
err: if (locked)
WT_TRET(pthread_mutex_unlock(&cond->mtx));
@@ -124,7 +124,7 @@ __wt_cond_signal(WT_SESSION_IMPL *session, WT_CONDVAR *cond)
if (cond->waiters == -1)
return (0);
- if (cond->waiters > 0 || !WT_ATOMIC_CAS4(cond->waiters, 0, -1)) {
+ if (cond->waiters > 0 || !__wt_atomic_casi32(&cond->waiters, 0, -1)) {
WT_ERR(pthread_mutex_lock(&cond->mtx));
locked = 1;
WT_ERR(pthread_cond_broadcast(&cond->cond));
diff --git a/src/os_posix/os_mtx_rw.c b/src/os_posix/os_mtx_rw.c
index c3ae43b605f..3927d618ede 100644
--- a/src/os_posix/os_mtx_rw.c
+++ b/src/os_posix/os_mtx_rw.c
@@ -82,7 +82,7 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
writers = l->s.writers;
old = (pad << 48) + (users << 32) + (users << 16) + writers;
new = (pad << 48) + ((users + 1) << 32) + ((users + 1) << 16) + writers;
- return (WT_ATOMIC_CAS_VAL8(l->u, old, new) == old ? 0 : EBUSY);
+ return (__wt_atomic_cas64(&l->u, old, new) ? 0 : EBUSY);
}
/*
@@ -102,7 +102,7 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_STAT_FAST_CONN_INCR(session, rwlock_read);
l = &rwlock->rwlock;
- me = WT_ATOMIC_FETCH_ADD8(l->u, (uint64_t)1 << 32);
+ me = __wt_atomic_fetch_add64(&l->u, (uint64_t)1 << 32);
val = (uint16_t)(me >> 32);
for (pause_cnt = 0; val != l->s.readers;) {
/*
@@ -138,7 +138,7 @@ __wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
session, WT_VERB_MUTEX, "rwlock: read unlock %s", rwlock->name));
l = &rwlock->rwlock;
- WT_ATOMIC_ADD2(l->s.writers, 1);
+ (void)__wt_atomic_add16(&l->s.writers, 1);
return (0);
}
@@ -163,7 +163,7 @@ __wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
users = l->s.users;
old = (pad << 48) + (users << 32) + (readers << 16) + users;
new = (pad << 48) + ((users + 1) << 32) + (readers << 16) + users;
- return (WT_ATOMIC_CAS_VAL8(l->u, old, new) == old ? 0 : EBUSY);
+ return (__wt_atomic_cas64(&l->u, old, new) ? 0 : EBUSY);
}
/*
@@ -187,7 +187,7 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* the write lock.
*/
l = &rwlock->rwlock;
- me = WT_ATOMIC_FETCH_ADD8(l->u, (uint64_t)1 << 32);
+ me = __wt_atomic_fetch_add64(&l->u, (uint64_t)1 << 32);
val = (uint16_t)(me >> 32);
while (val != l->s.writers)
WT_PAUSE();
diff --git a/src/os_posix/os_open.c b/src/os_posix/os_open.c
index b3dc8f1db27..1453f1a666d 100644
--- a/src/os_posix/os_open.c
+++ b/src/os_posix/os_open.c
@@ -177,7 +177,7 @@ setupfh:
}
if (!matched) {
WT_CONN_FILE_INSERT(conn, fh, bucket);
- (void)WT_ATOMIC_ADD4(conn->open_file_count, 1);
+ (void)__wt_atomic_add32(&conn->open_file_count, 1);
*fhp = fh;
}
@@ -223,7 +223,7 @@ __wt_close(WT_SESSION_IMPL *session, WT_FH **fhp)
/* Remove from the list. */
bucket = fh->name_hash % WT_HASH_ARRAY_SIZE;
WT_CONN_FILE_REMOVE(conn, fh, bucket);
- (void)WT_ATOMIC_SUB4(conn->open_file_count, 1);
+ (void)__wt_atomic_sub32(&conn->open_file_count, 1);
__wt_spin_unlock(session, &conn->fh_lock);
diff --git a/src/os_win/os_mtx_cond.c b/src/os_win/os_mtx_cond.c
index 51f6d6533c8..565928cb863 100644
--- a/src/os_win/os_mtx_cond.c
+++ b/src/os_win/os_mtx_cond.c
@@ -51,7 +51,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
locked = 0;
/* Fast path if already signalled. */
- if (WT_ATOMIC_ADD4(cond->waiters, 1) == 0)
+ if (__wt_atomic_addi32(&cond->waiters, 1) == 0)
return (0);
/*
@@ -97,7 +97,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
}
}
- (void)WT_ATOMIC_SUB4(cond->waiters, 1);
+ (void)__wt_atomic_subi32(&cond->waiters, 1);
if (locked)
LeaveCriticalSection(&cond->mtx);
@@ -130,7 +130,7 @@ __wt_cond_signal(WT_SESSION_IMPL *session, WT_CONDVAR *cond)
if (cond->waiters == -1)
return (0);
- if (cond->waiters > 0 || !WT_ATOMIC_CAS4(cond->waiters, 0, -1)) {
+ if (cond->waiters > 0 || !__wt_atomic_casi32(&cond->waiters, 0, -1)) {
EnterCriticalSection(&cond->mtx);
locked = 1;
WakeAllConditionVariable(&cond->cond);
diff --git a/src/os_win/os_open.c b/src/os_win/os_open.c
index 1c6f5636501..3bd24369242 100644
--- a/src/os_win/os_open.c
+++ b/src/os_win/os_open.c
@@ -169,7 +169,7 @@ setupfh:
}
if (!matched) {
WT_CONN_FILE_INSERT(conn, fh, bucket);
- (void)WT_ATOMIC_ADD4(conn->open_file_count, 1);
+ (void)__wt_atomic_add32(&conn->open_file_count, 1);
*fhp = fh;
}
@@ -217,7 +217,7 @@ __wt_close(WT_SESSION_IMPL *session, WT_FH **fhp)
/* Remove from the list. */
bucket = fh->name_hash % WT_HASH_ARRAY_SIZE;
WT_CONN_FILE_REMOVE(conn, fh, bucket);
- (void)WT_ATOMIC_SUB4(conn->open_file_count, 1);
+ (void)__wt_atomic_sub32(&conn->open_file_count, 1);
__wt_spin_unlock(session, &conn->fh_lock);
diff --git a/src/reconcile/rec_write.c b/src/reconcile/rec_write.c
index e11490ac7fc..eaaa3a56be3 100644
--- a/src/reconcile/rec_write.c
+++ b/src/reconcile/rec_write.c
@@ -277,7 +277,7 @@ typedef struct {
WT_SALVAGE_COOKIE *salvage; /* If it's a salvage operation */
- int tested_ref_state; /* Debugging information */
+ uint32_t tested_ref_state; /* Debugging information */
} WT_RECONCILE;
static void __rec_bnd_cleanup(WT_SESSION_IMPL *, WT_RECONCILE *, int);
@@ -1100,8 +1100,8 @@ __rec_child_modify(WT_SESSION_IMPL *session,
* to see if the delete is visible to us. Lock down the
* structure.
*/
- if (!WT_ATOMIC_CAS4(
- ref->state, WT_REF_DELETED, WT_REF_LOCKED))
+ if (!__wt_atomic_casv32(
+ &ref->state, WT_REF_DELETED, WT_REF_LOCKED))
break;
ret = __rec_child_deleted(session, r, ref, statep);
WT_PUBLISH(ref->state, WT_REF_DELETED);
@@ -5069,7 +5069,7 @@ err: __wt_scr_free(session, &tkey);
TXNID_LT(btree->rec_max_txn, r->max_txn))
btree->rec_max_txn = r->max_txn;
- if (WT_ATOMIC_CAS4(mod->write_gen, r->orig_write_gen, 0))
+ if (__wt_atomic_cas32(&mod->write_gen, r->orig_write_gen, 0))
__wt_cache_dirty_decr(session, page);
}
diff --git a/src/session/session_dhandle.c b/src/session/session_dhandle.c
index 7488d9115f0..61bc37da11d 100644
--- a/src/session/session_dhandle.c
+++ b/src/session/session_dhandle.c
@@ -31,7 +31,7 @@ __session_add_dhandle(
if (dhandle_cachep != NULL)
*dhandle_cachep = dhandle_cache;
- (void)WT_ATOMIC_ADD4(session->dhandle->session_ref, 1);
+ (void)__wt_atomic_add32(&session->dhandle->session_ref, 1);
/* Sweep the handle list to remove any dead handles. */
return (__session_dhandle_sweep(session));
@@ -51,7 +51,7 @@ __session_discard_dhandle(
TAILQ_REMOVE(&session->dhandles, dhandle_cache, q);
TAILQ_REMOVE(&session->dhhash[bucket], dhandle_cache, hashq);
- (void)WT_ATOMIC_SUB4(dhandle_cache->dhandle->session_ref, 1);
+ (void)__wt_atomic_sub32(&dhandle_cache->dhandle->session_ref, 1);
__wt_overwrite_and_free(session, dhandle_cache);
}
@@ -362,7 +362,7 @@ __session_find_shared_dhandle(WT_SESSION_IMPL *session,
const char *uri, const char *checkpoint, uint32_t flags)
{
WT_RET(__wt_conn_dhandle_find(session, uri, checkpoint, flags));
- (void)WT_ATOMIC_ADD4(session->dhandle->session_ref, 1);
+ (void)__wt_atomic_add32(&session->dhandle->session_ref, 1);
return (0);
}
diff --git a/src/txn/txn.c b/src/txn/txn.c
index a391ec8be88..45be0a15a32 100644
--- a/src/txn/txn.c
+++ b/src/txn/txn.c
@@ -96,7 +96,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session)
if ((count = txn_global->scan_count) < 0)
WT_PAUSE();
} while (count < 0 ||
- !WT_ATOMIC_CAS4(txn_global->scan_count, count, count + 1));
+ !__wt_atomic_casiv32(&txn_global->scan_count, count, count + 1));
current_id = snap_min = txn_global->current;
prev_oldest_id = txn_global->oldest_id;
@@ -109,7 +109,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session)
/* Check that the oldest ID has not moved in the meantime. */
if (prev_oldest_id == txn_global->oldest_id) {
WT_ASSERT(session, txn_global->scan_count > 0);
- (void)WT_ATOMIC_SUB4(txn_global->scan_count, 1);
+ (void)__wt_atomic_subiv32(&txn_global->scan_count, 1);
return;
}
}
@@ -149,7 +149,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session)
txn_global->last_running = snap_min;
WT_ASSERT(session, txn_global->scan_count > 0);
- (void)WT_ATOMIC_SUB4(txn_global->scan_count, 1);
+ (void)__wt_atomic_subiv32(&txn_global->scan_count, 1);
__txn_sort_snapshot(session, n, current_id);
}
@@ -203,7 +203,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force)
if ((count = txn_global->scan_count) < 0)
WT_PAUSE();
} while (count < 0 ||
- !WT_ATOMIC_CAS4(txn_global->scan_count, count, count + 1));
+ !__wt_atomic_casiv32(&txn_global->scan_count, count, count + 1));
/* The oldest ID cannot change until the scan count goes to zero. */
prev_oldest_id = txn_global->oldest_id;
@@ -251,7 +251,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force)
/* Update the oldest ID. */
if (TXNID_LT(prev_oldest_id, oldest_id) &&
- WT_ATOMIC_CAS4(txn_global->scan_count, 1, -1)) {
+ __wt_atomic_casiv32(&txn_global->scan_count, 1, -1)) {
WT_ORDERED_READ(session_cnt, conn->session_cnt);
for (i = 0, s = txn_global->states; i < session_cnt; i++, s++) {
if ((id = s->id) != WT_TXN_NONE &&
@@ -277,7 +277,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force)
oldest_session->txn.snap_min);
}
WT_ASSERT(session, txn_global->scan_count > 0);
- (void)WT_ATOMIC_SUB4(txn_global->scan_count, 1);
+ (void)__wt_atomic_subiv32(&txn_global->scan_count, 1);
}
}