diff options
author | Keith Bostic <keith@wiredtiger.com> | 2015-08-13 19:36:56 -0400 |
---|---|---|
committer | Keith Bostic <keith@wiredtiger.com> | 2015-08-13 19:36:56 -0400 |
commit | 368f622ba7e7eaca691ba4a4a433c614709bdbea (patch) | |
tree | ba6b240debade7e04335658cd95342ffc1a4c5b5 | |
parent | 7c1abeab21aa31f4b73f1974a6cbdd56cea0bc7c (diff) | |
download | mongo-368f622ba7e7eaca691ba4a4a433c614709bdbea.tar.gz |
Rename atomic functions from number of bytes to number of bits.
33 files changed, 150 insertions, 149 deletions
diff --git a/bench/wtperf/wtperf.c b/bench/wtperf/wtperf.c index 5b4d849e546..5d3b334785d 100644 --- a/bench/wtperf/wtperf.c +++ b/bench/wtperf/wtperf.c @@ -100,7 +100,7 @@ static uint64_t wtperf_value_range(CONFIG *); static inline uint64_t get_next_incr(CONFIG *cfg) { - return (__wt_atomic_add8(&cfg->insert_key, 1)); + return (__wt_atomic_add64(&cfg->insert_key, 1)); } static void @@ -151,7 +151,7 @@ cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags) switch (type) { case WT_AOP_COMPACT: tables = (uint32_t *)op->app_private; - (void)__wt_atomic_add4(tables, (uint32_t)-1); + (void)__wt_atomic_add32(tables, (uint32_t)-1); break; case WT_AOP_INSERT: trk = &thread->insert; @@ -186,7 +186,7 @@ cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags) return (0); if (ret == 0 || (ret == WT_NOTFOUND && type != WT_AOP_INSERT)) { if (!cfg->in_warmup) - (void)__wt_atomic_add8(&trk->ops, 1); + (void)__wt_atomic_add64(&trk->ops, 1); return (0); } err: diff --git a/src/async/async_api.c b/src/async/async_api.c index 7d0ae1bfd5d..cd232af5340 100644 --- a/src/async/async_api.c +++ b/src/async/async_api.c @@ -151,15 +151,16 @@ retry: * If we can set the state then the op entry is ours. * Start the next search at the next entry after this one. */ - if (!__wt_atomic_cas4(&op->state, WT_ASYNCOP_FREE, WT_ASYNCOP_READY)) { + if (!__wt_atomic_cas32(&op->state, WT_ASYNCOP_FREE, WT_ASYNCOP_READY)) { WT_STAT_FAST_CONN_INCR(session, async_alloc_race); goto retry; } WT_STAT_FAST_CONN_INCRV(session, async_alloc_view, view); WT_RET(__async_get_format(conn, uri, config, op)); - op->unique_id = __wt_atomic_add8(&async->op_id, 1); + op->unique_id = __wt_atomic_add64(&async->op_id, 1); op->optype = WT_AOP_NONE; - (void)__wt_atomic_store4(&async->ops_index, (i + 1) % conn->async_size); + (void)__wt_atomic_store32( + &async->ops_index, (i + 1) % conn->async_size); *opp = op; return (0); } @@ -514,7 +515,7 @@ retry: */ __wt_sleep(0, 100000); - if (!__wt_atomic_cas4(&async->flush_state, WT_ASYNC_FLUSH_NONE, + if (!__wt_atomic_cas32(&async->flush_state, WT_ASYNC_FLUSH_NONE, WT_ASYNC_FLUSH_IN_PROGRESS)) goto retry; /* @@ -524,7 +525,7 @@ retry: * things off the work queue with the lock. */ async->flush_count = 0; - (void)__wt_atomic_add8(&async->flush_gen, 1); + (void)__wt_atomic_add64(&async->flush_gen, 1); WT_ASSERT(session, async->flush_op.state == WT_ASYNCOP_FREE); async->flush_op.state = WT_ASYNCOP_READY; WT_ERR(__wt_async_op_enqueue(session, &async->flush_op)); diff --git a/src/async/async_op.c b/src/async/async_op.c index 920a7978f7e..7e1920933c2 100644 --- a/src/async/async_op.c +++ b/src/async/async_op.c @@ -280,7 +280,7 @@ __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op) * Enqueue op at the tail of the work queue. * We get our slot in the ring buffer to use. */ - my_alloc = __wt_atomic_add8(&async->alloc_head, 1); + my_alloc = __wt_atomic_add64(&async->alloc_head, 1); my_slot = my_alloc % async->async_qsize; /* @@ -300,7 +300,7 @@ __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op) #endif WT_PUBLISH(async->async_queue[my_slot], op); op->state = WT_ASYNCOP_ENQUEUED; - if (__wt_atomic_add4(&async->cur_queue, 1) > async->max_queue) + if (__wt_atomic_add32(&async->cur_queue, 1) > async->max_queue) WT_PUBLISH(async->max_queue, async->cur_queue); /* * Multiple threads may be adding ops to the queue. We need to wait diff --git a/src/async/async_worker.c b/src/async/async_worker.c index 0bab800608b..6a5ec5feeb0 100644 --- a/src/async/async_worker.c +++ b/src/async/async_worker.c @@ -67,7 +67,7 @@ retry: * a race, try again. */ my_consume = last_consume + 1; - if (!__wt_atomic_cas8(&async->alloc_tail, last_consume, my_consume)) + if (!__wt_atomic_cas64(&async->alloc_tail, last_consume, my_consume)) goto retry; /* * This item of work is ours to process. Clear it out of the @@ -81,7 +81,7 @@ retry: WT_ASSERT(session, async->cur_queue > 0); WT_ASSERT(session, *op != NULL); WT_ASSERT(session, (*op)->state == WT_ASYNCOP_ENQUEUED); - (void)__wt_atomic_sub4(&async->cur_queue, 1); + (void)__wt_atomic_sub32(&async->cur_queue, 1); (*op)->state = WT_ASYNCOP_WORKING; if (*op == &async->flush_op) @@ -316,7 +316,7 @@ __wt_async_worker(void *arg) * the queue. */ WT_ORDERED_READ(flush_gen, async->flush_gen); - if (__wt_atomic_add4(&async->flush_count, 1) == + if (__wt_atomic_add32(&async->flush_count, 1) == conn->async_workers) { /* * We're last. All workers accounted for so diff --git a/src/block/block_open.c b/src/block/block_open.c index c4884e8dafc..300e85e301e 100644 --- a/src/block/block_open.c +++ b/src/block/block_open.c @@ -158,9 +158,9 @@ __wt_block_configure_first_fit(WT_BLOCK *block, int on) * as long as any operation wants it. */ if (on) - (void)__wt_atomic_add4(&block->allocfirst, 1); + (void)__wt_atomic_add32(&block->allocfirst, 1); else - (void)__wt_atomic_sub4(&block->allocfirst, 1); + (void)__wt_atomic_sub32(&block->allocfirst, 1); } /* diff --git a/src/btree/bt_delete.c b/src/btree/bt_delete.c index a522001e747..cddfa0ef801 100644 --- a/src/btree/bt_delete.c +++ b/src/btree/bt_delete.c @@ -70,15 +70,15 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp) /* If we have a clean page in memory, attempt to evict it. */ if (ref->state == WT_REF_MEM && - __wt_atomic_casv4(&ref->state, WT_REF_MEM, WT_REF_LOCKED)) { + __wt_atomic_casv32(&ref->state, WT_REF_MEM, WT_REF_LOCKED)) { if (__wt_page_is_modified(ref->page)) { WT_PUBLISH(ref->state, WT_REF_MEM); return (0); } - (void)__wt_atomic_addv4(&S2BT(session)->evict_busy, 1); + (void)__wt_atomic_addv32(&S2BT(session)->evict_busy, 1); ret = __wt_evict_page(session, ref); - (void)__wt_atomic_subv4(&S2BT(session)->evict_busy, 1); + (void)__wt_atomic_subv32(&S2BT(session)->evict_busy, 1); WT_RET_BUSY_OK(ret); } @@ -93,7 +93,7 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp) * unclear optimizing for overlapping range deletes is worth the effort. */ if (ref->state != WT_REF_DISK || - !__wt_atomic_casv4(&ref->state, WT_REF_DISK, WT_REF_LOCKED)) + !__wt_atomic_casv32(&ref->state, WT_REF_DISK, WT_REF_LOCKED)) return (0); /* @@ -176,7 +176,7 @@ __wt_delete_page_rollback(WT_SESSION_IMPL *session, WT_REF *ref) * If the page is still "deleted", it's as we left it, * reset the state. */ - if (__wt_atomic_casv4( + if (__wt_atomic_casv32( &ref->state, WT_REF_DELETED, WT_REF_DISK)) return; break; @@ -242,7 +242,7 @@ __wt_delete_page_skip(WT_SESSION_IMPL *session, WT_REF *ref) if (ref->page_del == NULL) return (1); - if (!__wt_atomic_casv4(&ref->state, WT_REF_DELETED, WT_REF_LOCKED)) + if (!__wt_atomic_casv32(&ref->state, WT_REF_DELETED, WT_REF_LOCKED)) return (0); skip = (ref->page_del == NULL || diff --git a/src/btree/bt_page.c b/src/btree/bt_page.c index d97a0280e4b..414f7c88ff7 100644 --- a/src/btree/bt_page.c +++ b/src/btree/bt_page.c @@ -326,8 +326,8 @@ err: if ((pindex = WT_INTL_INDEX_GET_SAFE(page)) != NULL) { /* Increment the cache statistics. */ __wt_cache_page_inmem_incr(session, page, size); - (void)__wt_atomic_add8(&cache->bytes_read, size); - (void)__wt_atomic_add8(&cache->pages_inmem, 1); + (void)__wt_atomic_add64(&cache->bytes_read, size); + (void)__wt_atomic_add64(&cache->pages_inmem, 1); *pagep = page; return (0); diff --git a/src/btree/bt_read.c b/src/btree/bt_read.c index 4975177f3c3..a3ce39b7758 100644 --- a/src/btree/bt_read.c +++ b/src/btree/bt_read.c @@ -35,9 +35,9 @@ __wt_cache_read(WT_SESSION_IMPL *session, WT_REF *ref) * WT_REF_LOCKED, for deleted pages. If successful, we've won the * race, read the page. */ - if (__wt_atomic_casv4(&ref->state, WT_REF_DISK, WT_REF_READING)) + if (__wt_atomic_casv32(&ref->state, WT_REF_DISK, WT_REF_READING)) previous_state = WT_REF_DISK; - else if (__wt_atomic_casv4(&ref->state, WT_REF_DELETED, WT_REF_LOCKED)) + else if (__wt_atomic_casv32(&ref->state, WT_REF_DELETED, WT_REF_LOCKED)) previous_state = WT_REF_DELETED; else return (0); diff --git a/src/btree/bt_split.c b/src/btree/bt_split.c index aac7e3a6f38..7ed0947410d 100644 --- a/src/btree/bt_split.c +++ b/src/btree/bt_split.c @@ -557,7 +557,7 @@ __split_deepen(WT_SESSION_IMPL *session, WT_PAGE *parent) */ WT_ASSERT(session, WT_INTL_INDEX_GET_SAFE(parent) == pindex); WT_INTL_INDEX_SET(parent, alloc_index); - split_gen = __wt_atomic_addv8(&S2C(session)->split_gen, 1); + split_gen = __wt_atomic_addv64(&S2C(session)->split_gen, 1); panic = 1; #ifdef HAVE_DIAGNOSTIC @@ -933,7 +933,7 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref, WT_ASSERT(session, next_ref->state != WT_REF_SPLIT); if (next_ref->state == WT_REF_DELETED && __wt_delete_page_skip(session, next_ref) && - __wt_atomic_casv4( + __wt_atomic_casv32( &next_ref->state, WT_REF_DELETED, WT_REF_SPLIT)) deleted_entries++; } @@ -994,7 +994,7 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref, */ WT_ASSERT(session, WT_INTL_INDEX_GET_SAFE(parent) == pindex); WT_INTL_INDEX_SET(parent, alloc_index); - split_gen = __wt_atomic_addv8(&S2C(session)->split_gen, 1); + split_gen = __wt_atomic_addv64(&S2C(session)->split_gen, 1); alloc_index = NULL; #ifdef HAVE_DIAGNOSTIC diff --git a/src/evict/evict_lru.c b/src/evict/evict_lru.c index a542cb89018..7827c3e6285 100644 --- a/src/evict/evict_lru.c +++ b/src/evict/evict_lru.c @@ -972,7 +972,7 @@ retry: while (slot < max_entries && ret == 0) { } else { if (incr) { WT_ASSERT(session, dhandle->session_inuse > 0); - (void)__wt_atomic_subi4( + (void)__wt_atomic_subi32( &dhandle->session_inuse, 1); incr = 0; } @@ -1017,7 +1017,7 @@ retry: while (slot < max_entries && ret == 0) { btree->evict_walk_skips = 0; prev_slot = slot; - (void)__wt_atomic_addi4(&dhandle->session_inuse, 1); + (void)__wt_atomic_addi32(&dhandle->session_inuse, 1); incr = 1; __wt_spin_unlock(session, &conn->dhandle_lock); dhandle_locked = 0; @@ -1052,7 +1052,7 @@ retry: while (slot < max_entries && ret == 0) { cache->evict_file_next = dhandle; WT_ASSERT(session, dhandle->session_inuse > 0); - (void)__wt_atomic_subi4(&dhandle->session_inuse, 1); + (void)__wt_atomic_subi32(&dhandle->session_inuse, 1); incr = 0; } @@ -1321,7 +1321,7 @@ __evict_get_ref( * multiple attempts to evict it. For pages that are already * being evicted, this operation will fail and we will move on. */ - if (!__wt_atomic_casv4( + if (!__wt_atomic_casv32( &evict->ref->state, WT_REF_MEM, WT_REF_LOCKED)) { __evict_list_clear(session, evict); continue; @@ -1331,7 +1331,7 @@ __evict_get_ref( * Increment the busy count in the btree handle to prevent it * from being closed under us. */ - (void)__wt_atomic_addv4(&evict->btree->evict_busy, 1); + (void)__wt_atomic_addv32(&evict->btree->evict_busy, 1); *btreep = evict->btree; *refp = evict->ref; @@ -1410,7 +1410,7 @@ __evict_page(WT_SESSION_IMPL *session, int is_server) WT_WITH_BTREE(session, btree, ret = __wt_evict_page(session, ref)); - (void)__wt_atomic_subv4(&btree->evict_busy, 1); + (void)__wt_atomic_subv32(&btree->evict_busy, 1); WT_RET(ret); diff --git a/src/include/btree.i b/src/include/btree.i index e34c42dc89a..a43a7c60d81 100644 --- a/src/include/btree.i +++ b/src/include/btree.i @@ -49,17 +49,17 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size) WT_ASSERT(session, size < WT_EXABYTE); cache = S2C(session)->cache; - (void)__wt_atomic_add8(&cache->bytes_inmem, size); - (void)__wt_atomic_add8(&page->memory_footprint, size); + (void)__wt_atomic_add64(&cache->bytes_inmem, size); + (void)__wt_atomic_add64(&page->memory_footprint, size); if (__wt_page_is_modified(page)) { - (void)__wt_atomic_add8(&cache->bytes_dirty, size); - (void)__wt_atomic_add8(&page->modify->bytes_dirty, size); + (void)__wt_atomic_add64(&cache->bytes_dirty, size); + (void)__wt_atomic_add64(&page->modify->bytes_dirty, size); } /* Track internal and overflow size in cache. */ if (WT_PAGE_IS_INTERNAL(page)) - (void)__wt_atomic_add8(&cache->bytes_internal, size); + (void)__wt_atomic_add64(&cache->bytes_internal, size); else if (page->type == WT_PAGE_OVFL) - (void)__wt_atomic_add8(&cache->bytes_overflow, size); + (void)__wt_atomic_add64(&cache->bytes_overflow, size); } /* @@ -73,8 +73,8 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size) #ifdef HAVE_DIAGNOSTIC #define WT_CACHE_DECR(session, f, sz) do { \ static int __first = 1; \ - if (__wt_atomic_sub8(&f, sz) > WT_EXABYTE) { \ - (void)__wt_atomic_add8(&f, sz); \ + if (__wt_atomic_sub64(&f, sz) > WT_EXABYTE) { \ + (void)__wt_atomic_add64(&f, sz); \ if (__first) { \ __wt_errx(session, \ "%s underflow: decrementing %" WT_SIZET_FMT,\ @@ -85,8 +85,8 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size) } while (0) #else #define WT_CACHE_DECR(s, f, sz) do { \ - if (__wt_atomic_sub8(&f, sz) > WT_EXABYTE) \ - (void)__wt_atomic_add8(&f, sz); \ + if (__wt_atomic_sub64(&f, sz) > WT_EXABYTE) \ + (void)__wt_atomic_add64(&f, sz); \ } while (0) #endif @@ -128,7 +128,7 @@ __wt_cache_page_byte_dirty_decr( */ orig = page->modify->bytes_dirty; decr = WT_MIN(size, orig); - if (__wt_atomic_cas8( + if (__wt_atomic_cas64( &page->modify->bytes_dirty, orig, orig - decr)) { WT_CACHE_DECR(session, cache->bytes_dirty, decr); break; @@ -172,15 +172,15 @@ __wt_cache_dirty_incr(WT_SESSION_IMPL *session, WT_PAGE *page) size_t size; cache = S2C(session)->cache; - (void)__wt_atomic_add8(&cache->pages_dirty, 1); + (void)__wt_atomic_add64(&cache->pages_dirty, 1); /* * Take care to read the memory_footprint once in case we are racing * with updates. */ size = page->memory_footprint; - (void)__wt_atomic_add8(&cache->bytes_dirty, size); - (void)__wt_atomic_add8(&page->modify->bytes_dirty, size); + (void)__wt_atomic_add64(&cache->bytes_dirty, size); + (void)__wt_atomic_add64(&page->modify->bytes_dirty, size); } /* @@ -202,7 +202,7 @@ __wt_cache_dirty_decr(WT_SESSION_IMPL *session, WT_PAGE *page) "count went negative"); cache->pages_dirty = 0; } else - (void)__wt_atomic_sub8(&cache->pages_dirty, 1); + (void)__wt_atomic_sub64(&cache->pages_dirty, 1); modify = page->modify; if (modify != NULL && modify->bytes_dirty != 0) @@ -244,8 +244,8 @@ __wt_cache_page_evict(WT_SESSION_IMPL *session, WT_PAGE *page) } /* Update pages and bytes evicted. */ - (void)__wt_atomic_add8(&cache->bytes_evict, page->memory_footprint); - (void)__wt_atomic_add8(&cache->pages_evict, 1); + (void)__wt_atomic_add64(&cache->bytes_evict, page->memory_footprint); + (void)__wt_atomic_add64(&cache->pages_evict, 1); } /* @@ -306,7 +306,7 @@ __wt_page_only_modify_set(WT_SESSION_IMPL *session, WT_PAGE *page) * Every time the page transitions from clean to dirty, update the cache * and transactional information. */ - if (__wt_atomic_add4(&page->modify->write_gen, 1) == 1) { + if (__wt_atomic_add32(&page->modify->write_gen, 1) == 1) { __wt_cache_dirty_incr(session, page); /* @@ -1059,14 +1059,14 @@ __wt_page_release_evict(WT_SESSION_IMPL *session, WT_REF *ref) * reference without first locking the page, it could be evicted in * between. */ - locked = __wt_atomic_casv4(&ref->state, WT_REF_MEM, WT_REF_LOCKED); + locked = __wt_atomic_casv32(&ref->state, WT_REF_MEM, WT_REF_LOCKED); if ((ret = __wt_hazard_clear(session, page)) != 0 || !locked) { if (locked) ref->state = WT_REF_MEM; return (ret == 0 ? EBUSY : ret); } - (void)__wt_atomic_addv4(&btree->evict_busy, 1); + (void)__wt_atomic_addv32(&btree->evict_busy, 1); too_big = (page->memory_footprint > btree->maxmempage) ? 1 : 0; if ((ret = __wt_evict_page(session, ref)) == 0) { @@ -1083,7 +1083,7 @@ __wt_page_release_evict(WT_SESSION_IMPL *session, WT_REF *ref) } else WT_STAT_FAST_CONN_INCR(session, cache_eviction_force_fail); - (void)__wt_atomic_subv4(&btree->evict_busy, 1); + (void)__wt_atomic_subv32(&btree->evict_busy, 1); return (ret); } diff --git a/src/include/cursor.i b/src/include/cursor.i index 8c743a259dc..484af0b4a58 100644 --- a/src/include/cursor.i +++ b/src/include/cursor.i @@ -150,7 +150,7 @@ __wt_cursor_dhandle_incr_use(WT_SESSION_IMPL *session) dhandle = session->dhandle; /* If we open a handle with a time of death set, clear it. */ - if (__wt_atomic_addi4(&dhandle->session_inuse, 1) == 1 && + if (__wt_atomic_addi32(&dhandle->session_inuse, 1) == 1 && dhandle->timeofdeath != 0) dhandle->timeofdeath = 0; } @@ -168,7 +168,7 @@ __wt_cursor_dhandle_decr_use(WT_SESSION_IMPL *session) /* If we close a handle with a time of death set, clear it. */ WT_ASSERT(session, dhandle->session_inuse > 0); - if (__wt_atomic_subi4(&dhandle->session_inuse, 1) == 0 && + if (__wt_atomic_subi32(&dhandle->session_inuse, 1) == 0 && dhandle->timeofdeath != 0) dhandle->timeofdeath = 0; } diff --git a/src/include/gcc.h b/src/include/gcc.h index 20709b52b94..33b8f373093 100644 --- a/src/include/gcc.h +++ b/src/include/gcc.h @@ -129,16 +129,16 @@ __wt_atomic_cas##name(type *vp, type old, type new) \ return (WT_ATOMIC_CAS(vp, old, new)); \ } -WT_ATOMIC_FUNC(1, uint8_t, uint8_t) -WT_ATOMIC_FUNC(2, uint16_t, uint16_t) -WT_ATOMIC_FUNC(4, uint32_t, uint32_t) -WT_ATOMIC_FUNC(v4, uint32_t, volatile uint32_t) -WT_ATOMIC_FUNC(i4, int32_t, int32_t) -WT_ATOMIC_FUNC(iv4, int32_t, volatile int32_t) -WT_ATOMIC_FUNC(8, uint64_t, uint64_t) -WT_ATOMIC_FUNC(v8, uint64_t, volatile uint64_t) -WT_ATOMIC_FUNC(i8, int64_t, int64_t) -WT_ATOMIC_FUNC(iv8, int64_t, volatile int64_t) +WT_ATOMIC_FUNC(8, uint8_t, uint8_t) +WT_ATOMIC_FUNC(16, uint16_t, uint16_t) +WT_ATOMIC_FUNC(32, uint32_t, uint32_t) +WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t) +WT_ATOMIC_FUNC(i32, int32_t, int32_t) +WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t) +WT_ATOMIC_FUNC(64, uint64_t, uint64_t) +WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t) +WT_ATOMIC_FUNC(i64, int64_t, int64_t) +WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t) /* * __wt_atomic_cas_ptr -- diff --git a/src/include/hardware.h b/src/include/hardware.h index 5bb68d1227b..335663fc651 100644 --- a/src/include/hardware.h +++ b/src/include/hardware.h @@ -33,7 +33,7 @@ uint8_t __orig; \ do { \ __orig = (p)->flags_atomic; \ - } while (!__wt_atomic_cas1( \ + } while (!__wt_atomic_cas8( \ &(p)->flags_atomic, __orig, __orig | (uint8_t)(mask))); \ } while (0) @@ -46,7 +46,7 @@ ret = EBUSY; \ break; \ } \ - } while (!__wt_atomic_cas1( \ + } while (!__wt_atomic_cas8( \ &(p)->flags_atomic, __orig, __orig | (uint8_t)(mask))); \ } while (0) @@ -54,7 +54,7 @@ uint8_t __orig; \ do { \ __orig = (p)->flags_atomic; \ - } while (!__wt_atomic_cas1( \ + } while (!__wt_atomic_cas8( \ &(p)->flags_atomic, __orig, __orig & ~(uint8_t)(mask))); \ } while (0) diff --git a/src/include/lint.h b/src/include/lint.h index 6ee6e7b7752..dae5918380a 100644 --- a/src/include/lint.h +++ b/src/include/lint.h @@ -59,16 +59,16 @@ __wt_atomic_cas##name(type *vp, type old, type new) \ return (0); \ } -WT_ATOMIC_FUNC(1, uint8_t, uint8_t) -WT_ATOMIC_FUNC(2, uint16_t, uint16_t) -WT_ATOMIC_FUNC(4, uint32_t, uint32_t) -WT_ATOMIC_FUNC(v4, uint32_t, volatile uint32_t) -WT_ATOMIC_FUNC(i4, int32_t, int32_t) -WT_ATOMIC_FUNC(iv4, int32_t, volatile int32_t) -WT_ATOMIC_FUNC(8, uint64_t, uint64_t) -WT_ATOMIC_FUNC(v8, uint64_t, volatile uint64_t) -WT_ATOMIC_FUNC(i8, int64_t, int64_t) -WT_ATOMIC_FUNC(iv8, int64_t, volatile int64_t) +WT_ATOMIC_FUNC(8, uint8_t, uint8_t) +WT_ATOMIC_FUNC(16, uint16_t, uint16_t) +WT_ATOMIC_FUNC(32, uint32_t, uint32_t) +WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t) +WT_ATOMIC_FUNC(i32, int32_t, int32_t) +WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t) +WT_ATOMIC_FUNC(64, uint64_t, uint64_t) +WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t) +WT_ATOMIC_FUNC(i64, int64_t, int64_t) +WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t) /* * __wt_atomic_cas_ptr -- diff --git a/src/include/msvc.h b/src/include/msvc.h index c9d2c1e8382..3066e33c660 100644 --- a/src/include/msvc.h +++ b/src/include/msvc.h @@ -59,16 +59,16 @@ __wt_atomic_cas##name(type *vp, type old, type new) \ ((t *)(vp), (t)(new), (t)(old)) == (t)(old)); \ } -WT_ATOMIC_FUNC(1, uint8_t, uint8_t, 8, char) -WT_ATOMIC_FUNC(2, uint16_t, uint16_t, 16, short) -WT_ATOMIC_FUNC(4, uint32_t, uint32_t, , long) -WT_ATOMIC_FUNC(v4, uint32_t, volatile uint32_t, , long) -WT_ATOMIC_FUNC(i4, int32_t, int32_t, , long) -WT_ATOMIC_FUNC(iv4, int32_t, volatile int32_t, , long) -WT_ATOMIC_FUNC(8, uint64_t, uint64_t, 64, __int64) -WT_ATOMIC_FUNC(v8, uint64_t, volatile uint64_t, 64, __int64) -WT_ATOMIC_FUNC(i8, int64_t, int64_t, 64, __int64) -WT_ATOMIC_FUNC(iv8, int64_t, volatile int64_t, 64, __int64) +WT_ATOMIC_FUNC(8, uint8_t, uint8_t, 8, char) +WT_ATOMIC_FUNC(16, uint16_t, uint16_t, 16, short) +WT_ATOMIC_FUNC(32, uint32_t, uint32_t, , long) +WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t, , long) +WT_ATOMIC_FUNC(i32, int32_t, int32_t, , long) +WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t, , long) +WT_ATOMIC_FUNC(64, uint64_t, uint64_t, 64, __int64) +WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t, 64, __int64) +WT_ATOMIC_FUNC(i64, int64_t, int64_t, 64, __int64) +WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t, 64, __int64) /* * __wt_atomic_cas_ptr -- diff --git a/src/include/stat.h b/src/include/stat.h index dfb3704a2fb..3592c021919 100644 --- a/src/include/stat.h +++ b/src/include/stat.h @@ -17,11 +17,11 @@ struct __wt_stats { #define WT_STAT(stats, fld) \ ((stats)->fld.v) #define WT_STAT_ATOMIC_DECRV(stats, fld, value) do { \ - (void)__wt_atomic_sub8(&WT_STAT(stats, fld), (value)); \ + (void)__wt_atomic_sub64(&WT_STAT(stats, fld), (value)); \ } while (0) #define WT_STAT_ATOMIC_DECR(stats, fld) WT_STAT_ATOMIC_DECRV(stats, fld, 1) #define WT_STAT_ATOMIC_INCRV(stats, fld, value) do { \ - (void)__wt_atomic_add8(&WT_STAT(stats, fld), (value)); \ + (void)__wt_atomic_add64(&WT_STAT(stats, fld), (value)); \ } while (0) #define WT_STAT_ATOMIC_INCR(stats, fld) WT_STAT_ATOMIC_INCRV(stats, fld, 1) #define WT_STAT_DECRV(stats, fld, value) do { \ diff --git a/src/include/txn.i b/src/include/txn.i index 00b3de77734..1228893871f 100644 --- a/src/include/txn.i +++ b/src/include/txn.i @@ -300,7 +300,7 @@ __wt_txn_new_id(WT_SESSION_IMPL *session) * global current ID, so we want post-increment semantics. Our atomic * add primitive does pre-increment, so adjust the result here. */ - return (__wt_atomic_addv8(&S2C(session)->txn_global.current, 1) - 1); + return (__wt_atomic_addv64(&S2C(session)->txn_global.current, 1) - 1); } /* @@ -376,7 +376,7 @@ __wt_txn_id_check(WT_SESSION_IMPL *session) */ do { txn_state->id = txn->id = txn_global->current; - } while (!__wt_atomic_casv8( + } while (!__wt_atomic_casv64( &txn_global->current, txn->id, txn->id + 1) || WT_TXNID_LT(txn->id, txn_global->last_running)); diff --git a/src/log/log.c b/src/log/log.c index 42461844df8..26ba34c7f93 100644 --- a/src/log/log.c +++ b/src/log/log.c @@ -791,7 +791,7 @@ __wt_log_allocfile( */ WT_RET(__wt_scr_alloc(session, 0, &from_path)); WT_ERR(__wt_scr_alloc(session, 0, &to_path)); - tmp_id = __wt_atomic_add4(&log->tmp_fileid, 1); + tmp_id = __wt_atomic_add32(&log->tmp_fileid, 1); WT_ERR(__log_filename(session, tmp_id, WT_LOG_TMPNAME, from_path)); WT_ERR(__log_filename(session, lognum, dest, to_path)); /* diff --git a/src/log/log_slot.c b/src/log/log_slot.c index 611dc1b858d..404cec7d0bd 100644 --- a/src/log/log_slot.c +++ b/src/log/log_slot.c @@ -168,7 +168,7 @@ join_slot: * We lost a race to add our size into this slot. Check the state * and try again. */ - if (!__wt_atomic_casiv8(&slot->slot_state, old_state, new_state)) { + if (!__wt_atomic_casiv64(&slot->slot_state, old_state, new_state)) { WT_STAT_FAST_CONN_INCR(session, log_slot_races); goto join_slot; } @@ -248,7 +248,7 @@ __wt_log_slot_close(WT_SESSION_IMPL *session, WT_LOGSLOT *slot) newslot->slot_index = slot->slot_index; log->slot_array[newslot->slot_index] = newslot; old_state = - __wt_atomic_storeiv8(&slot->slot_state, WT_LOG_SLOT_PENDING); + __wt_atomic_storeiv64(&slot->slot_state, WT_LOG_SLOT_PENDING); slot->slot_group_size = (uint64_t)(old_state - WT_LOG_SLOT_READY); /* * Note that this statistic may be much bigger than in reality, @@ -308,7 +308,7 @@ __wt_log_slot_release(WT_LOGSLOT *slot, uint64_t size) * Add my size into the state. When it reaches WT_LOG_SLOT_DONE * all participatory threads have completed copying their piece. */ - return (__wt_atomic_addiv8(&slot->slot_state, (int64_t)size)); + return (__wt_atomic_addiv64(&slot->slot_state, (int64_t)size)); } /* diff --git a/src/lsm/lsm_manager.c b/src/lsm/lsm_manager.c index bf28e28804c..6c59232b619 100644 --- a/src/lsm/lsm_manager.c +++ b/src/lsm/lsm_manager.c @@ -258,7 +258,7 @@ __wt_lsm_manager_free_work_unit( if (entry != NULL) { WT_ASSERT(session, entry->lsm_tree->queue_ref > 0); - (void)__wt_atomic_sub4(&entry->lsm_tree->queue_ref, 1); + (void)__wt_atomic_sub32(&entry->lsm_tree->queue_ref, 1); __wt_free(session, entry); } } @@ -639,9 +639,9 @@ __wt_lsm_manager_push_entry(WT_SESSION_IMPL *session, * on close, the flag is cleared and then the queue reference count * is checked. */ - (void)__wt_atomic_add4(&lsm_tree->queue_ref, 1); + (void)__wt_atomic_add32(&lsm_tree->queue_ref, 1); if (!F_ISSET(lsm_tree, WT_LSM_TREE_ACTIVE)) { - (void)__wt_atomic_sub4(&lsm_tree->queue_ref, 1); + (void)__wt_atomic_sub32(&lsm_tree->queue_ref, 1); return (0); } @@ -668,6 +668,6 @@ __wt_lsm_manager_push_entry(WT_SESSION_IMPL *session, return (0); err: if (!pushed) - (void)__wt_atomic_sub4(&lsm_tree->queue_ref, 1); + (void)__wt_atomic_sub32(&lsm_tree->queue_ref, 1); return (ret); } diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c index 21d8caf6964..40991f845e4 100644 --- a/src/lsm/lsm_merge.c +++ b/src/lsm/lsm_merge.c @@ -398,7 +398,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id) locked = 0; /* Allocate an ID for the merge. */ - dest_id = __wt_atomic_add4(&lsm_tree->last, 1); + dest_id = __wt_atomic_add32(&lsm_tree->last, 1); /* * We only want to do the chunk loop if we're running with verbose, @@ -493,7 +493,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id) * merge_syncing field so that compact knows it is still in * progress. */ - (void)__wt_atomic_add4(&lsm_tree->merge_syncing, 1); + (void)__wt_atomic_add32(&lsm_tree->merge_syncing, 1); in_sync = 1; /* * We've successfully created the new chunk. Now install it. We need @@ -544,7 +544,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id) WT_TRET(dest->close(dest)); dest = NULL; ++lsm_tree->merge_progressing; - (void)__wt_atomic_sub4(&lsm_tree->merge_syncing, 1); + (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1); in_sync = 0; WT_ERR_NOTFOUND_OK(ret); @@ -600,7 +600,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id) err: if (locked) WT_TRET(__wt_lsm_tree_writeunlock(session, lsm_tree)); if (in_sync) - (void)__wt_atomic_sub4(&lsm_tree->merge_syncing, 1); + (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1); if (src != NULL) WT_TRET(src->close(src)); if (dest != NULL) diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c index 28230a5e35e..f34f0598261 100644 --- a/src/lsm/lsm_tree.c +++ b/src/lsm/lsm_tree.c @@ -141,7 +141,7 @@ __wt_lsm_tree_close_all(WT_SESSION_IMPL *session) * is no need to decrement the reference count since discard * is unconditional. */ - (void)__wt_atomic_add4(&lsm_tree->refcnt, 1); + (void)__wt_atomic_add32(&lsm_tree->refcnt, 1); WT_TRET(__lsm_tree_close(session, lsm_tree)); WT_TRET(__lsm_tree_discard(session, lsm_tree, 1)); } @@ -486,17 +486,17 @@ __lsm_tree_find(WT_SESSION_IMPL *session, * Make sure we win the race to switch on the * exclusive flag. */ - if (!__wt_atomic_cas1( + if (!__wt_atomic_cas8( &lsm_tree->exclusive, 0, 1)) return (EBUSY); /* Make sure there are no readers */ - if (!__wt_atomic_cas4( + if (!__wt_atomic_cas32( &lsm_tree->refcnt, 0, 1)) { lsm_tree->exclusive = 0; return (EBUSY); } } else { - (void)__wt_atomic_add4(&lsm_tree->refcnt, 1); + (void)__wt_atomic_add32(&lsm_tree->refcnt, 1); /* * We got a reference, check if an exclusive @@ -505,7 +505,7 @@ __lsm_tree_find(WT_SESSION_IMPL *session, if (lsm_tree->exclusive) { WT_ASSERT(session, lsm_tree->refcnt > 0); - (void)__wt_atomic_sub4( + (void)__wt_atomic_sub32( &lsm_tree->refcnt, 1); return (EBUSY); } @@ -567,7 +567,7 @@ __lsm_tree_open(WT_SESSION_IMPL *session, WT_ASSERT(session, F_ISSET(session, WT_SESSION_LOCKED_HANDLE_LIST)); /* Start the LSM manager thread if it isn't running. */ - if (__wt_atomic_cas4(&conn->lsm_manager.lsm_workers, 0, 1)) + if (__wt_atomic_cas32(&conn->lsm_manager.lsm_workers, 0, 1)) WT_RET(__wt_lsm_manager_start(session)); /* Make sure no one beat us to it. */ @@ -646,7 +646,7 @@ __wt_lsm_tree_release(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) WT_ASSERT(session, lsm_tree->refcnt > 0); if (lsm_tree->exclusive) lsm_tree->exclusive = 0; - (void)__wt_atomic_sub4(&lsm_tree->refcnt, 1); + (void)__wt_atomic_sub32(&lsm_tree->refcnt, 1); } /* How aggressively to ramp up or down throttle due to level 0 merging */ @@ -841,7 +841,7 @@ __wt_lsm_tree_switch(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) /* Update the throttle time. */ __wt_lsm_tree_throttle(session, lsm_tree, 0); - new_id = __wt_atomic_add4(&lsm_tree->last, 1); + new_id = __wt_atomic_add32(&lsm_tree->last, 1); WT_ERR(__wt_realloc_def(session, &lsm_tree->chunk_alloc, nchunks + 1, &lsm_tree->chunk)); @@ -1099,7 +1099,7 @@ __wt_lsm_tree_truncate( /* Create the new chunk. */ WT_ERR(__wt_calloc_one(session, &chunk)); - chunk->id = __wt_atomic_add4(&lsm_tree->last, 1); + chunk->id = __wt_atomic_add32(&lsm_tree->last, 1); WT_ERR(__wt_lsm_tree_setup_chunk(session, lsm_tree, chunk)); /* Mark all chunks old. */ @@ -1285,7 +1285,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip) * If we have a chunk, we want to look for it to be on-disk. * So we need to add a reference to keep it available. */ - (void)__wt_atomic_add4(&chunk->refcnt, 1); + (void)__wt_atomic_add32(&chunk->refcnt, 1); ref = 1; } @@ -1333,7 +1333,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip) "Start compacting progress %" PRIu64, name, chunk->id, lsm_tree->merge_progressing)); - (void)__wt_atomic_sub4(&chunk->refcnt, 1); + (void)__wt_atomic_sub32(&chunk->refcnt, 1); flushing = ref = 0; compacting = 1; F_SET(lsm_tree, WT_LSM_TREE_COMPACTING); @@ -1387,7 +1387,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip) err: /* Ensure anything we set is cleared. */ if (ref) - (void)__wt_atomic_sub4(&chunk->refcnt, 1); + (void)__wt_atomic_sub32(&chunk->refcnt, 1); if (compacting) { F_CLR(lsm_tree, WT_LSM_TREE_COMPACTING); lsm_tree->merge_aggressiveness = 0; diff --git a/src/lsm/lsm_work_unit.c b/src/lsm/lsm_work_unit.c index 4e3c311ca94..0c36c68e9f5 100644 --- a/src/lsm/lsm_work_unit.c +++ b/src/lsm/lsm_work_unit.c @@ -53,7 +53,7 @@ __lsm_copy_chunks(WT_SESSION_IMPL *session, * it's safe. */ for (i = 0; i < nchunks; i++) - (void)__wt_atomic_add4(&cookie->chunk_array[i]->refcnt, 1); + (void)__wt_atomic_add32(&cookie->chunk_array[i]->refcnt, 1); err: WT_TRET(__wt_lsm_tree_readunlock(session, lsm_tree)); @@ -122,7 +122,7 @@ __wt_lsm_get_chunk_to_flush(WT_SESSION_IMPL *session, force ? " w/ force" : "", i, lsm_tree->nchunks, chunk->uri)); - (void)__wt_atomic_add4(&chunk->refcnt, 1); + (void)__wt_atomic_add32(&chunk->refcnt, 1); } err: WT_RET(__wt_lsm_tree_readunlock(session, lsm_tree)); @@ -145,7 +145,7 @@ __lsm_unpin_chunks(WT_SESSION_IMPL *session, WT_LSM_WORKER_COOKIE *cookie) if (cookie->chunk_array[i] == NULL) continue; WT_ASSERT(session, cookie->chunk_array[i]->refcnt > 0); - (void)__wt_atomic_sub4(&cookie->chunk_array[i]->refcnt, 1); + (void)__wt_atomic_sub32(&cookie->chunk_array[i]->refcnt, 1); } /* Ensure subsequent calls don't double decrement. */ cookie->nchunks = 0; @@ -223,7 +223,7 @@ __wt_lsm_work_bloom(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) * See if we win the race to switch on the "busy" flag and * recheck that the chunk still needs a Bloom filter. */ - if (__wt_atomic_cas4(&chunk->bloom_busy, 0, 1)) { + if (__wt_atomic_cas32(&chunk->bloom_busy, 0, 1)) { if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) { ret = __lsm_bloom_create( session, lsm_tree, chunk, (u_int)i); @@ -528,7 +528,7 @@ __wt_lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) * Make sure only a single thread is freeing the old chunk array * at any time. */ - if (!__wt_atomic_cas4(&lsm_tree->freeing_old_chunks, 0, 1)) + if (!__wt_atomic_cas32(&lsm_tree->freeing_old_chunks, 0, 1)) return (0); /* * Take a copy of the current state of the LSM tree and look for chunks diff --git a/src/lsm/lsm_worker.c b/src/lsm/lsm_worker.c index a041ab9b189..3add3155e17 100644 --- a/src/lsm/lsm_worker.c +++ b/src/lsm/lsm_worker.c @@ -65,7 +65,7 @@ __lsm_worker_general_op( ret = __wt_lsm_checkpoint_chunk( session, entry->lsm_tree, chunk); WT_ASSERT(session, chunk->refcnt > 0); - (void)__wt_atomic_sub4(&chunk->refcnt, 1); + (void)__wt_atomic_sub32(&chunk->refcnt, 1); WT_ERR(ret); } } else if (entry->type == WT_LSM_WORK_DROP) diff --git a/src/os_posix/os_mtx_cond.c b/src/os_posix/os_mtx_cond.c index 7c17e8a6fcf..baf9b475777 100644 --- a/src/os_posix/os_mtx_cond.c +++ b/src/os_posix/os_mtx_cond.c @@ -54,7 +54,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs) locked = 0; /* Fast path if already signalled. */ - if (__wt_atomic_addi4(&cond->waiters, 1) == 0) + if (__wt_atomic_addi32(&cond->waiters, 1) == 0) return (0); /* @@ -91,7 +91,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs) ret == ETIMEDOUT) ret = 0; - (void)__wt_atomic_subi4(&cond->waiters, 1); + (void)__wt_atomic_subi32(&cond->waiters, 1); err: if (locked) WT_TRET(pthread_mutex_unlock(&cond->mtx)); @@ -124,7 +124,7 @@ __wt_cond_signal(WT_SESSION_IMPL *session, WT_CONDVAR *cond) if (cond->waiters == -1) return (0); - if (cond->waiters > 0 || !__wt_atomic_casi4(&cond->waiters, 0, -1)) { + if (cond->waiters > 0 || !__wt_atomic_casi32(&cond->waiters, 0, -1)) { WT_ERR(pthread_mutex_lock(&cond->mtx)); locked = 1; WT_ERR(pthread_cond_broadcast(&cond->cond)); diff --git a/src/os_posix/os_mtx_rw.c b/src/os_posix/os_mtx_rw.c index f11476f8cfd..d47ab197643 100644 --- a/src/os_posix/os_mtx_rw.c +++ b/src/os_posix/os_mtx_rw.c @@ -165,7 +165,7 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) * incrementing the reader value to match it. */ new.s.readers = new.s.users = old.s.users + 1; - return (__wt_atomic_cas8(&l->u, old.u, new.u) ? 0 : EBUSY); + return (__wt_atomic_cas64(&l->u, old.u, new.u) ? 0 : EBUSY); } /* @@ -190,7 +190,7 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) * value will wrap and two lockers will simultaneously be granted the * lock. */ - ticket = __wt_atomic_fetch_add2(&l->s.users, 1); + ticket = __wt_atomic_fetch_add16(&l->s.users, 1); for (pause_cnt = 0; ticket != l->s.readers;) { /* * We failed to get the lock; pause before retrying and if we've @@ -234,7 +234,7 @@ __wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) * Increment the writers value (other readers are doing the same, make * sure we don't race). */ - (void)__wt_atomic_add2(&l->s.writers, 1); + (void)__wt_atomic_add16(&l->s.writers, 1); return (0); } @@ -267,7 +267,7 @@ __wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) /* The replacement lock value is a result of allocating a new ticket. */ ++new.s.users; - return (__wt_atomic_cas8(&l->u, old.u, new.u) ? 0 : EBUSY); + return (__wt_atomic_cas64(&l->u, old.u, new.u) ? 0 : EBUSY); } /* @@ -292,7 +292,7 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) * value will wrap and two lockers will simultaneously be granted the * lock. */ - ticket = __wt_atomic_fetch_add2(&l->s.users, 1); + ticket = __wt_atomic_fetch_add16(&l->s.users, 1); for (pause_cnt = 0; ticket != l->s.writers;) { /* * We failed to get the lock; pause before retrying and if we've diff --git a/src/os_posix/os_open.c b/src/os_posix/os_open.c index b7bbf3614c8..8622bb5b4ca 100644 --- a/src/os_posix/os_open.c +++ b/src/os_posix/os_open.c @@ -177,7 +177,7 @@ setupfh: } if (!matched) { WT_CONN_FILE_INSERT(conn, fh, bucket); - (void)__wt_atomic_add4(&conn->open_file_count, 1); + (void)__wt_atomic_add32(&conn->open_file_count, 1); *fhp = fh; } __wt_spin_unlock(session, &conn->fh_lock); @@ -222,7 +222,7 @@ __wt_close(WT_SESSION_IMPL *session, WT_FH **fhp) /* Remove from the list. */ bucket = fh->name_hash % WT_HASH_ARRAY_SIZE; WT_CONN_FILE_REMOVE(conn, fh, bucket); - (void)__wt_atomic_sub4(&conn->open_file_count, 1); + (void)__wt_atomic_sub32(&conn->open_file_count, 1); __wt_spin_unlock(session, &conn->fh_lock); diff --git a/src/os_win/os_mtx_cond.c b/src/os_win/os_mtx_cond.c index afc2051b359..565928cb863 100644 --- a/src/os_win/os_mtx_cond.c +++ b/src/os_win/os_mtx_cond.c @@ -51,7 +51,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs) locked = 0; /* Fast path if already signalled. */ - if (__wt_atomic_addi4(&cond->waiters, 1) == 0) + if (__wt_atomic_addi32(&cond->waiters, 1) == 0) return (0); /* @@ -97,7 +97,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs) } } - (void)__wt_atomic_subi4(&cond->waiters, 1); + (void)__wt_atomic_subi32(&cond->waiters, 1); if (locked) LeaveCriticalSection(&cond->mtx); @@ -130,7 +130,7 @@ __wt_cond_signal(WT_SESSION_IMPL *session, WT_CONDVAR *cond) if (cond->waiters == -1) return (0); - if (cond->waiters > 0 || !__wt_atomic_casi4(&cond->waiters, 0, -1)) { + if (cond->waiters > 0 || !__wt_atomic_casi32(&cond->waiters, 0, -1)) { EnterCriticalSection(&cond->mtx); locked = 1; WakeAllConditionVariable(&cond->cond); diff --git a/src/os_win/os_open.c b/src/os_win/os_open.c index 20fd1edb9c3..3bd24369242 100644 --- a/src/os_win/os_open.c +++ b/src/os_win/os_open.c @@ -169,7 +169,7 @@ setupfh: } if (!matched) { WT_CONN_FILE_INSERT(conn, fh, bucket); - (void)__wt_atomic_add4(&conn->open_file_count, 1); + (void)__wt_atomic_add32(&conn->open_file_count, 1); *fhp = fh; } @@ -217,7 +217,7 @@ __wt_close(WT_SESSION_IMPL *session, WT_FH **fhp) /* Remove from the list. */ bucket = fh->name_hash % WT_HASH_ARRAY_SIZE; WT_CONN_FILE_REMOVE(conn, fh, bucket); - (void)__wt_atomic_sub4(&conn->open_file_count, 1); + (void)__wt_atomic_sub32(&conn->open_file_count, 1); __wt_spin_unlock(session, &conn->fh_lock); diff --git a/src/reconcile/rec_write.c b/src/reconcile/rec_write.c index 7563fd46058..6b0ca54065e 100644 --- a/src/reconcile/rec_write.c +++ b/src/reconcile/rec_write.c @@ -1104,7 +1104,7 @@ __rec_child_modify(WT_SESSION_IMPL *session, * to see if the delete is visible to us. Lock down the * structure. */ - if (!__wt_atomic_casv4( + if (!__wt_atomic_casv32( &ref->state, WT_REF_DELETED, WT_REF_LOCKED)) break; ret = __rec_child_deleted(session, r, ref, statep); @@ -5108,7 +5108,7 @@ err: __wt_scr_free(session, &tkey); * write generation changed, the page has been written since * we started reconciliation and remains dirty. */ - if (__wt_atomic_cas4(&mod->write_gen, r->orig_write_gen, 0)) + if (__wt_atomic_cas32(&mod->write_gen, r->orig_write_gen, 0)) __wt_cache_dirty_decr(session, page); } diff --git a/src/session/session_dhandle.c b/src/session/session_dhandle.c index 5b0d2c94868..875a6bb440d 100644 --- a/src/session/session_dhandle.c +++ b/src/session/session_dhandle.c @@ -276,7 +276,7 @@ __session_discard_btree( TAILQ_REMOVE(&session->dhandles, dhandle_cache, q); TAILQ_REMOVE(&session->dhhash[bucket], dhandle_cache, hashq); - (void)__wt_atomic_sub4(&dhandle_cache->dhandle->session_ref, 1); + (void)__wt_atomic_sub32(&dhandle_cache->dhandle->session_ref, 1); __wt_overwrite_and_free(session, dhandle_cache); } @@ -348,7 +348,7 @@ __session_dhandle_find_shared( WT_SESSION_IMPL *session, const char *uri, const char *checkpoint) { WT_RET(__wt_conn_dhandle_find(session, uri, checkpoint)); - (void)__wt_atomic_add4(&session->dhandle->session_ref, 1); + (void)__wt_atomic_add32(&session->dhandle->session_ref, 1); return (0); } /* diff --git a/src/txn/txn.c b/src/txn/txn.c index 96f2f72678b..3bd8ae03cfb 100644 --- a/src/txn/txn.c +++ b/src/txn/txn.c @@ -134,7 +134,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session) if ((count = txn_global->scan_count) < 0) WT_PAUSE(); } while (count < 0 || - !__wt_atomic_casiv4(&txn_global->scan_count, count, count + 1)); + !__wt_atomic_casiv32(&txn_global->scan_count, count, count + 1)); current_id = snap_min = txn_global->current; prev_oldest_id = txn_global->oldest_id; @@ -147,7 +147,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session) /* Check that the oldest ID has not moved in the meantime. */ if (prev_oldest_id == txn_global->oldest_id) { WT_ASSERT(session, txn_global->scan_count > 0); - (void)__wt_atomic_subiv4(&txn_global->scan_count, 1); + (void)__wt_atomic_subiv32(&txn_global->scan_count, 1); return; } } @@ -183,7 +183,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session) txn_state->snap_min = snap_min; WT_ASSERT(session, txn_global->scan_count > 0); - (void)__wt_atomic_subiv4(&txn_global->scan_count, 1); + (void)__wt_atomic_subiv32(&txn_global->scan_count, 1); __txn_sort_snapshot(session, n, current_id); } @@ -237,7 +237,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force) if ((count = txn_global->scan_count) < 0) WT_PAUSE(); } while (count < 0 || - !__wt_atomic_casiv4(&txn_global->scan_count, count, count + 1)); + !__wt_atomic_casiv32(&txn_global->scan_count, count, count + 1)); /* The oldest ID cannot change until the scan count goes to zero. */ prev_oldest_id = txn_global->oldest_id; @@ -288,7 +288,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force) /* Update the oldest ID. */ if ((WT_TXNID_LT(prev_oldest_id, oldest_id) || last_running_moved) && - __wt_atomic_casiv4(&txn_global->scan_count, 1, -1)) { + __wt_atomic_casiv32(&txn_global->scan_count, 1, -1)) { WT_ORDERED_READ(session_cnt, conn->session_cnt); for (i = 0, s = txn_global->states; i < session_cnt; i++, s++) { if ((id = s->id) != WT_TXN_NONE && @@ -333,7 +333,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force) oldest_session->txn.snap_min); } WT_ASSERT(session, txn_global->scan_count > 0); - (void)__wt_atomic_subiv4(&txn_global->scan_count, 1); + (void)__wt_atomic_subiv32(&txn_global->scan_count, 1); } } |