summaryrefslogtreecommitdiff
path: root/src/lsm
diff options
context:
space:
mode:
authorMichael Cahill <michael.cahill@mongodb.com>2015-08-19 12:04:55 +1000
committerKeith Bostic <keith@wiredtiger.com>2015-08-19 11:45:31 -0400
commitba32b71f75c85dd6ff3558346d19154d037888ec (patch)
tree3c3b90617c0a337f65d026bd13bc66ced3d0492d /src/lsm
parent1c2542ff9337af935ce95a63c91cd2024694634e (diff)
downloadmongo-ba32b71f75c85dd6ff3558346d19154d037888ec.tar.gz
Merge pull request #2112 from wiredtiger/wt-2025-inline-atomic-functions
WT-2025: inline atomic functions. (cherry picked from commit ada57c1ed44ab0dd4f904f60a63bbc77333b2baa)
Diffstat (limited to 'src/lsm')
-rw-r--r--src/lsm/lsm_manager.c4
-rw-r--r--src/lsm/lsm_merge.c8
-rw-r--r--src/lsm/lsm_tree.c31
-rw-r--r--src/lsm/lsm_work_unit.c10
-rw-r--r--src/lsm/lsm_worker.c2
5 files changed, 29 insertions, 26 deletions
diff --git a/src/lsm/lsm_manager.c b/src/lsm/lsm_manager.c
index 1ea41f24ee2..0e22af3b623 100644
--- a/src/lsm/lsm_manager.c
+++ b/src/lsm/lsm_manager.c
@@ -259,7 +259,7 @@ __wt_lsm_manager_free_work_unit(
if (entry != NULL) {
WT_ASSERT(session, entry->lsm_tree->queue_ref > 0);
- (void)WT_ATOMIC_SUB4(entry->lsm_tree->queue_ref, 1);
+ (void)__wt_atomic_sub32(&entry->lsm_tree->queue_ref, 1);
__wt_free(session, entry);
}
}
@@ -673,7 +673,7 @@ __wt_lsm_manager_push_entry(WT_SESSION_IMPL *session,
entry->type = type;
entry->flags = flags;
entry->lsm_tree = lsm_tree;
- (void)WT_ATOMIC_ADD4(lsm_tree->queue_ref, 1);
+ (void)__wt_atomic_add32(&lsm_tree->queue_ref, 1);
WT_STAT_FAST_CONN_INCR(session, lsm_work_units_created);
if (type == WT_LSM_WORK_SWITCH)
diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c
index 6ca1b0f04ab..de7ea37e498 100644
--- a/src/lsm/lsm_merge.c
+++ b/src/lsm/lsm_merge.c
@@ -284,7 +284,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
locked = 0;
/* Allocate an ID for the merge. */
- dest_id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ dest_id = __wt_atomic_add32(&lsm_tree->last, 1);
/*
* We only want to do the chunk loop if we're running with verbose,
@@ -375,7 +375,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
* merge_syncing field so that compact knows it is still in
* progress.
*/
- (void)WT_ATOMIC_ADD4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_add32(&lsm_tree->merge_syncing, 1);
in_sync = 1;
/*
* We've successfully created the new chunk. Now install it. We need
@@ -426,7 +426,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
WT_TRET(dest->close(dest));
dest = NULL;
++lsm_tree->merge_progressing;
- (void)WT_ATOMIC_SUB4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1);
in_sync = 0;
WT_ERR_NOTFOUND_OK(ret);
@@ -482,7 +482,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
err: if (locked)
WT_TRET(__wt_lsm_tree_writeunlock(session, lsm_tree));
if (in_sync)
- (void)WT_ATOMIC_SUB4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1);
if (src != NULL)
WT_TRET(src->close(src));
if (dest != NULL)
diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c
index 63f19858279..3c8f4d5750a 100644
--- a/src/lsm/lsm_tree.c
+++ b/src/lsm/lsm_tree.c
@@ -134,7 +134,7 @@ __wt_lsm_tree_close_all(WT_SESSION_IMPL *session)
* is no need to decrement the reference count since discard
* is unconditional.
*/
- (void)WT_ATOMIC_ADD4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_add32(&lsm_tree->refcnt, 1);
WT_TRET(__lsm_tree_close(session, lsm_tree));
WT_TRET(__lsm_tree_discard(session, lsm_tree, 1));
}
@@ -474,15 +474,17 @@ __lsm_tree_find(WT_SESSION_IMPL *session,
* Make sure we win the race to switch on the
* exclusive flag.
*/
- if (!WT_ATOMIC_CAS1(lsm_tree->exclusive, 0, 1))
+ if (!__wt_atomic_cas8(
+ &lsm_tree->exclusive, 0, 1))
return (EBUSY);
/* Make sure there are no readers */
- if (!WT_ATOMIC_CAS4(lsm_tree->refcnt, 0, 1)) {
+ if (!__wt_atomic_cas32(
+ &lsm_tree->refcnt, 0, 1)) {
lsm_tree->exclusive = 0;
return (EBUSY);
}
} else {
- (void)WT_ATOMIC_ADD4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_add32(&lsm_tree->refcnt, 1);
/*
* We got a reference, check if an exclusive
@@ -491,8 +493,8 @@ __lsm_tree_find(WT_SESSION_IMPL *session,
if (lsm_tree->exclusive) {
WT_ASSERT(session,
lsm_tree->refcnt > 0);
- (void)WT_ATOMIC_SUB4(
- lsm_tree->refcnt, 1);
+ (void)__wt_atomic_sub32(
+ &lsm_tree->refcnt, 1);
return (EBUSY);
}
}
@@ -553,7 +555,7 @@ __lsm_tree_open(WT_SESSION_IMPL *session,
WT_ASSERT(session, F_ISSET(session, WT_SESSION_HANDLE_LIST_LOCKED));
/* Start the LSM manager thread if it isn't running. */
- if (WT_ATOMIC_CAS4(conn->lsm_manager.lsm_workers, 0, 1))
+ if (__wt_atomic_cas32(&conn->lsm_manager.lsm_workers, 0, 1))
WT_RET(__wt_lsm_manager_start(session));
/* Make sure no one beat us to it. */
@@ -632,7 +634,7 @@ __wt_lsm_tree_release(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
WT_ASSERT(session, lsm_tree->refcnt > 0);
if (lsm_tree->exclusive)
lsm_tree->exclusive = 0;
- (void)WT_ATOMIC_SUB4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->refcnt, 1);
}
/* How aggressively to ramp up or down throttle due to level 0 merging */
@@ -827,7 +829,7 @@ __wt_lsm_tree_switch(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
/* Update the throttle time. */
__wt_lsm_tree_throttle(session, lsm_tree, 0);
- new_id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ new_id = __wt_atomic_add32(&lsm_tree->last, 1);
WT_ERR(__wt_realloc_def(session, &lsm_tree->chunk_alloc,
nchunks + 1, &lsm_tree->chunk));
@@ -1085,7 +1087,7 @@ __wt_lsm_tree_truncate(
/* Create the new chunk. */
WT_ERR(__wt_calloc_one(session, &chunk));
- chunk->id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ chunk->id = __wt_atomic_add32(&lsm_tree->last, 1);
WT_ERR(__wt_lsm_tree_setup_chunk(session, lsm_tree, chunk));
/* Mark all chunks old. */
@@ -1195,7 +1197,8 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
WT_LSM_TREE *lsm_tree;
time_t begin, end;
uint64_t progress;
- int i, compacting, flushing, locked, ref;
+ uint32_t i;
+ int compacting, flushing, locked, ref;
compacting = flushing = locked = ref = 0;
chunk = NULL;
@@ -1258,7 +1261,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
* If we have a chunk, we want to look for it to be on-disk.
* So we need to add a reference to keep it available.
*/
- (void)WT_ATOMIC_ADD4(chunk->refcnt, 1);
+ (void)__wt_atomic_add32(&chunk->refcnt, 1);
ref = 1;
}
@@ -1306,7 +1309,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
"Start compacting progress %" PRIu64,
name, chunk->id,
lsm_tree->merge_progressing));
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
flushing = ref = 0;
compacting = 1;
F_SET(lsm_tree, WT_LSM_TREE_COMPACTING);
@@ -1360,7 +1363,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
err:
/* Ensure anything we set is cleared. */
if (ref)
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
if (compacting) {
F_CLR(lsm_tree, WT_LSM_TREE_COMPACTING);
lsm_tree->merge_aggressiveness = 0;
diff --git a/src/lsm/lsm_work_unit.c b/src/lsm/lsm_work_unit.c
index 4f5e1516f1c..3e0fd43d404 100644
--- a/src/lsm/lsm_work_unit.c
+++ b/src/lsm/lsm_work_unit.c
@@ -53,7 +53,7 @@ __lsm_copy_chunks(WT_SESSION_IMPL *session,
* it's safe.
*/
for (i = 0; i < nchunks; i++)
- (void)WT_ATOMIC_ADD4(cookie->chunk_array[i]->refcnt, 1);
+ (void)__wt_atomic_add32(&cookie->chunk_array[i]->refcnt, 1);
err: WT_TRET(__wt_lsm_tree_readunlock(session, lsm_tree));
@@ -122,7 +122,7 @@ __wt_lsm_get_chunk_to_flush(WT_SESSION_IMPL *session,
force ? " w/ force" : "",
i, lsm_tree->nchunks, chunk->uri));
- (void)WT_ATOMIC_ADD4(chunk->refcnt, 1);
+ (void)__wt_atomic_add32(&chunk->refcnt, 1);
}
err: WT_RET(__wt_lsm_tree_readunlock(session, lsm_tree));
@@ -145,7 +145,7 @@ __lsm_unpin_chunks(WT_SESSION_IMPL *session, WT_LSM_WORKER_COOKIE *cookie)
if (cookie->chunk_array[i] == NULL)
continue;
WT_ASSERT(session, cookie->chunk_array[i]->refcnt > 0);
- (void)WT_ATOMIC_SUB4(cookie->chunk_array[i]->refcnt, 1);
+ (void)__wt_atomic_sub32(&cookie->chunk_array[i]->refcnt, 1);
}
/* Ensure subsequent calls don't double decrement. */
cookie->nchunks = 0;
@@ -219,7 +219,7 @@ __wt_lsm_work_bloom(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* See if we win the race to switch on the "busy" flag and
* recheck that the chunk still needs a Bloom filter.
*/
- if (WT_ATOMIC_CAS4(chunk->bloom_busy, 0, 1)) {
+ if (__wt_atomic_cas32(&chunk->bloom_busy, 0, 1)) {
if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) {
ret = __lsm_bloom_create(
session, lsm_tree, chunk, (u_int)i);
@@ -541,7 +541,7 @@ __wt_lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* Make sure only a single thread is freeing the old chunk array
* at any time.
*/
- if (!WT_ATOMIC_CAS4(lsm_tree->freeing_old_chunks, 0, 1))
+ if (!__wt_atomic_cas32(&lsm_tree->freeing_old_chunks, 0, 1))
return (0);
/*
* Take a copy of the current state of the LSM tree and look for chunks
diff --git a/src/lsm/lsm_worker.c b/src/lsm/lsm_worker.c
index d1272df763d..252523c5c57 100644
--- a/src/lsm/lsm_worker.c
+++ b/src/lsm/lsm_worker.c
@@ -65,7 +65,7 @@ __lsm_worker_general_op(
ret = __wt_lsm_checkpoint_chunk(
session, entry->lsm_tree, chunk);
WT_ASSERT(session, chunk->refcnt > 0);
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
WT_ERR(ret);
}
} else if (entry->type == WT_LSM_WORK_DROP)