summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Cahill <michael.cahill@mongodb.com>2015-09-18 17:18:22 +1000
committerMichael Cahill <michael.cahill@mongodb.com>2015-09-18 17:18:22 +1000
commita1c0433ba23983604bccf2a881df5c4de90ada03 (patch)
treee8cc04fd0fe92877b00645840c14a45219ee2653
parentceadd8c1a5c22b7f8c7906b601589090345a7af9 (diff)
downloadmongo-a1c0433ba23983604bccf2a881df5c4de90ada03.tar.gz
Import wiredtiger-wiredtiger-mongodb-3.0.6-18-g3489b0b.tar.gz from wiredtiger branch mongodb-3.0
-rw-r--r--src/third_party/wiredtiger/bench/wtperf/wtperf.c6
-rw-r--r--src/third_party/wiredtiger/dist/api_data.py9
-rw-r--r--src/third_party/wiredtiger/dist/s_define.list22
-rw-r--r--src/third_party/wiredtiger/dist/s_string.ok2
-rw-r--r--src/third_party/wiredtiger/dist/stat_data.py1
-rw-r--r--src/third_party/wiredtiger/lang/java/java_doc.i1
-rw-r--r--src/third_party/wiredtiger/src/async/async_api.c11
-rw-r--r--src/third_party/wiredtiger/src/async/async_op.c4
-rw-r--r--src/third_party/wiredtiger/src/async/async_worker.c6
-rw-r--r--src/third_party/wiredtiger/src/block/block_open.c4
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_curnext.c12
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_curprev.c12
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_delete.c14
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_page.c19
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_read.c6
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_split.c13
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_vrfy_dsk.c6
-rw-r--r--src/third_party/wiredtiger/src/btree/row_key.c5
-rw-r--r--src/third_party/wiredtiger/src/btree/row_modify.c4
-rw-r--r--src/third_party/wiredtiger/src/config/config_def.c9
-rw-r--r--src/third_party/wiredtiger/src/evict/evict_lru.c87
-rw-r--r--src/third_party/wiredtiger/src/include/async.h34
-rw-r--r--src/third_party/wiredtiger/src/include/btmem.h18
-rw-r--r--src/third_party/wiredtiger/src/include/btree.i140
-rw-r--r--src/third_party/wiredtiger/src/include/cell.i26
-rw-r--r--src/third_party/wiredtiger/src/include/config.h31
-rw-r--r--src/third_party/wiredtiger/src/include/cursor.i4
-rw-r--r--src/third_party/wiredtiger/src/include/extern.h1
-rw-r--r--src/third_party/wiredtiger/src/include/gcc.h104
-rw-r--r--src/third_party/wiredtiger/src/include/hardware.h12
-rw-r--r--src/third_party/wiredtiger/src/include/lint.h101
-rw-r--r--src/third_party/wiredtiger/src/include/lsm.h8
-rw-r--r--src/third_party/wiredtiger/src/include/msvc.h103
-rw-r--r--src/third_party/wiredtiger/src/include/serial.i6
-rw-r--r--src/third_party/wiredtiger/src/include/stat.h5
-rw-r--r--src/third_party/wiredtiger/src/include/txn.i7
-rw-r--r--src/third_party/wiredtiger/src/include/wiredtiger.in136
-rw-r--r--src/third_party/wiredtiger/src/log/log.c33
-rw-r--r--src/third_party/wiredtiger/src/log/log_slot.c16
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_cursor_bulk.c1
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_manager.c4
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_merge.c8
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_tree.c31
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_work_unit.c10
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_worker.c2
-rw-r--r--src/third_party/wiredtiger/src/os_posix/os_mtx_cond.c6
-rw-r--r--src/third_party/wiredtiger/src/os_posix/os_mtx_rw.c10
-rw-r--r--src/third_party/wiredtiger/src/os_posix/os_open.c4
-rw-r--r--src/third_party/wiredtiger/src/os_win/os_mtx_cond.c6
-rw-r--r--src/third_party/wiredtiger/src/os_win/os_open.c4
-rw-r--r--src/third_party/wiredtiger/src/reconcile/rec_write.c8
-rw-r--r--src/third_party/wiredtiger/src/session/session_api.c36
-rw-r--r--src/third_party/wiredtiger/src/session/session_dhandle.c9
-rw-r--r--src/third_party/wiredtiger/src/support/rand.c7
-rw-r--r--src/third_party/wiredtiger/src/support/stat.c2
-rw-r--r--src/third_party/wiredtiger/src/txn/txn.c12
56 files changed, 698 insertions, 500 deletions
diff --git a/src/third_party/wiredtiger/bench/wtperf/wtperf.c b/src/third_party/wiredtiger/bench/wtperf/wtperf.c
index a4f679ae736..9ee7865aa0b 100644
--- a/src/third_party/wiredtiger/bench/wtperf/wtperf.c
+++ b/src/third_party/wiredtiger/bench/wtperf/wtperf.c
@@ -94,7 +94,7 @@ static uint64_t wtperf_value_range(CONFIG *);
static inline uint64_t
get_next_incr(CONFIG *cfg)
{
- return (WT_ATOMIC_ADD8(cfg->insert_key, 1));
+ return (__wt_atomic_add64(&cfg->insert_key, 1));
}
static inline void
@@ -154,7 +154,7 @@ cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags)
switch (type) {
case WT_AOP_COMPACT:
tables = (uint32_t *)op->app_private;
- WT_ATOMIC_ADD4(*tables, (uint32_t)-1);
+ (void)__wt_atomic_add32(tables, (uint32_t)-1);
break;
case WT_AOP_INSERT:
trk = &thread->insert;
@@ -189,7 +189,7 @@ cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags)
return (0);
if (ret == 0 || (ret == WT_NOTFOUND && type != WT_AOP_INSERT)) {
if (!cfg->in_warmup)
- (void)WT_ATOMIC_ADD8(trk->ops, 1);
+ (void)__wt_atomic_add64(&trk->ops, 1);
return (0);
}
err:
diff --git a/src/third_party/wiredtiger/dist/api_data.py b/src/third_party/wiredtiger/dist/api_data.py
index 215e3f81803..3b14fae427c 100644
--- a/src/third_party/wiredtiger/dist/api_data.py
+++ b/src/third_party/wiredtiger/dist/api_data.py
@@ -662,6 +662,15 @@ methods = {
type='boolean'),
]),
+'session.log_flush' : Method([
+ Config('sync', 'on', r'''
+ forcibly flush the log and wait for it to synchronize. If
+ using \c off, then wait for the log to be written to the file
+ system. If using \c on, wait for the log to be synchronized
+ to the backing store.''',
+ choices=['off', 'on']),
+]),
+
'session.log_printf' : Method([]),
'session.open_cursor' : Method(cursor_runtime_config + [
diff --git a/src/third_party/wiredtiger/dist/s_define.list b/src/third_party/wiredtiger/dist/s_define.list
index 1bceb6a54fb..65fc60e9c7a 100644
--- a/src/third_party/wiredtiger/dist/s_define.list
+++ b/src/third_party/wiredtiger/dist/s_define.list
@@ -17,20 +17,8 @@ TXN_API_CALL
TXN_API_CALL_NOCONF
TXN_API_END
WIN32_LEAN_AND_MEAN
-WT_ATOMIC_ADD1
-WT_ATOMIC_ADD2
-WT_ATOMIC_CAS1
-WT_ATOMIC_CAS2
-WT_ATOMIC_CAS_VAL1
-WT_ATOMIC_CAS_VAL2
-WT_ATOMIC_CAS_VAL4
-WT_ATOMIC_FETCH_ADD1
-WT_ATOMIC_FETCH_ADD2
-WT_ATOMIC_FETCH_ADD4
-WT_ATOMIC_STORE1
-WT_ATOMIC_STORE2
-WT_ATOMIC_SUB1
-WT_ATOMIC_SUB2
+WT_ATOMIC_CAS
+WT_ATOMIC_FUNC
WT_BARRIER
WT_BLOCK_DESC_SIZE
WT_CACHE_LINE_ALIGNMENT
@@ -67,9 +55,3 @@ WT_WITH_LOCK
__F
__WIREDTIGER_EXT_H_
__WIREDTIGER_H_
-__WT_ATOMIC_ADD
-__WT_ATOMIC_CAS
-__WT_ATOMIC_CAS_VAL
-__WT_ATOMIC_FETCH_ADD
-__WT_ATOMIC_STORE
-__WT_ATOMIC_SUB
diff --git a/src/third_party/wiredtiger/dist/s_string.ok b/src/third_party/wiredtiger/dist/s_string.ok
index 2bdd1d88a54..4dd42826b55 100644
--- a/src/third_party/wiredtiger/dist/s_string.ok
+++ b/src/third_party/wiredtiger/dist/s_string.ok
@@ -464,6 +464,7 @@ bzalloc
bzfree
bzip
calloc
+cas
catfmt
cb
cd
@@ -909,6 +910,7 @@ ps
pse
psp
pthread
+ptr
pushms
putK
putV
diff --git a/src/third_party/wiredtiger/dist/stat_data.py b/src/third_party/wiredtiger/dist/stat_data.py
index d27840d3fbc..37843acb188 100644
--- a/src/third_party/wiredtiger/dist/stat_data.py
+++ b/src/third_party/wiredtiger/dist/stat_data.py
@@ -228,6 +228,7 @@ connection_stats = [
LogStat('log_compress_small', 'log records too small to compress'),
LogStat('log_compress_writes', 'log records compressed'),
LogStat('log_compress_write_fails', 'log records not compressed'),
+ LogStat('log_flush', 'log flush operations'),
LogStat('log_max_filesize', 'maximum log file size', 'no_clear,no_scale'),
LogStat('log_prealloc_files', 'pre-allocated log files prepared'),
LogStat('log_prealloc_max',
diff --git a/src/third_party/wiredtiger/lang/java/java_doc.i b/src/third_party/wiredtiger/lang/java/java_doc.i
index 53785a3bab4..d997306d87f 100644
--- a/src/third_party/wiredtiger/lang/java/java_doc.i
+++ b/src/third_party/wiredtiger/lang/java/java_doc.i
@@ -33,6 +33,7 @@ COPYDOC(__wt_session, WT_SESSION, open_cursor)
COPYDOC(__wt_session, WT_SESSION, create)
COPYDOC(__wt_session, WT_SESSION, compact)
COPYDOC(__wt_session, WT_SESSION, drop)
+COPYDOC(__wt_session, WT_SESSION, log_flush)
COPYDOC(__wt_session, WT_SESSION, log_printf)
COPYDOC(__wt_session, WT_SESSION, rename)
COPYDOC(__wt_session, WT_SESSION, salvage)
diff --git a/src/third_party/wiredtiger/src/async/async_api.c b/src/third_party/wiredtiger/src/async/async_api.c
index 9874d7aab00..cd232af5340 100644
--- a/src/third_party/wiredtiger/src/async/async_api.c
+++ b/src/third_party/wiredtiger/src/async/async_api.c
@@ -151,15 +151,16 @@ retry:
* If we can set the state then the op entry is ours.
* Start the next search at the next entry after this one.
*/
- if (!WT_ATOMIC_CAS4(op->state, WT_ASYNCOP_FREE, WT_ASYNCOP_READY)) {
+ if (!__wt_atomic_cas32(&op->state, WT_ASYNCOP_FREE, WT_ASYNCOP_READY)) {
WT_STAT_FAST_CONN_INCR(session, async_alloc_race);
goto retry;
}
WT_STAT_FAST_CONN_INCRV(session, async_alloc_view, view);
WT_RET(__async_get_format(conn, uri, config, op));
- op->unique_id = WT_ATOMIC_ADD8(async->op_id, 1);
+ op->unique_id = __wt_atomic_add64(&async->op_id, 1);
op->optype = WT_AOP_NONE;
- (void)WT_ATOMIC_STORE4(async->ops_index, (i + 1) % conn->async_size);
+ (void)__wt_atomic_store32(
+ &async->ops_index, (i + 1) % conn->async_size);
*opp = op;
return (0);
}
@@ -514,7 +515,7 @@ retry:
*/
__wt_sleep(0, 100000);
- if (!WT_ATOMIC_CAS4(async->flush_state, WT_ASYNC_FLUSH_NONE,
+ if (!__wt_atomic_cas32(&async->flush_state, WT_ASYNC_FLUSH_NONE,
WT_ASYNC_FLUSH_IN_PROGRESS))
goto retry;
/*
@@ -524,7 +525,7 @@ retry:
* things off the work queue with the lock.
*/
async->flush_count = 0;
- (void)WT_ATOMIC_ADD8(async->flush_gen, 1);
+ (void)__wt_atomic_add64(&async->flush_gen, 1);
WT_ASSERT(session, async->flush_op.state == WT_ASYNCOP_FREE);
async->flush_op.state = WT_ASYNCOP_READY;
WT_ERR(__wt_async_op_enqueue(session, &async->flush_op));
diff --git a/src/third_party/wiredtiger/src/async/async_op.c b/src/third_party/wiredtiger/src/async/async_op.c
index d0c58f584cc..7e1920933c2 100644
--- a/src/third_party/wiredtiger/src/async/async_op.c
+++ b/src/third_party/wiredtiger/src/async/async_op.c
@@ -280,7 +280,7 @@ __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op)
* Enqueue op at the tail of the work queue.
* We get our slot in the ring buffer to use.
*/
- my_alloc = WT_ATOMIC_ADD8(async->alloc_head, 1);
+ my_alloc = __wt_atomic_add64(&async->alloc_head, 1);
my_slot = my_alloc % async->async_qsize;
/*
@@ -300,7 +300,7 @@ __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op)
#endif
WT_PUBLISH(async->async_queue[my_slot], op);
op->state = WT_ASYNCOP_ENQUEUED;
- if (WT_ATOMIC_ADD4(async->cur_queue, 1) > async->max_queue)
+ if (__wt_atomic_add32(&async->cur_queue, 1) > async->max_queue)
WT_PUBLISH(async->max_queue, async->cur_queue);
/*
* Multiple threads may be adding ops to the queue. We need to wait
diff --git a/src/third_party/wiredtiger/src/async/async_worker.c b/src/third_party/wiredtiger/src/async/async_worker.c
index 473e7103832..6a5ec5feeb0 100644
--- a/src/third_party/wiredtiger/src/async/async_worker.c
+++ b/src/third_party/wiredtiger/src/async/async_worker.c
@@ -67,7 +67,7 @@ retry:
* a race, try again.
*/
my_consume = last_consume + 1;
- if (!WT_ATOMIC_CAS8(async->alloc_tail, last_consume, my_consume))
+ if (!__wt_atomic_cas64(&async->alloc_tail, last_consume, my_consume))
goto retry;
/*
* This item of work is ours to process. Clear it out of the
@@ -81,7 +81,7 @@ retry:
WT_ASSERT(session, async->cur_queue > 0);
WT_ASSERT(session, *op != NULL);
WT_ASSERT(session, (*op)->state == WT_ASYNCOP_ENQUEUED);
- (void)WT_ATOMIC_SUB4(async->cur_queue, 1);
+ (void)__wt_atomic_sub32(&async->cur_queue, 1);
(*op)->state = WT_ASYNCOP_WORKING;
if (*op == &async->flush_op)
@@ -316,7 +316,7 @@ __wt_async_worker(void *arg)
* the queue.
*/
WT_ORDERED_READ(flush_gen, async->flush_gen);
- if (WT_ATOMIC_ADD4(async->flush_count, 1) ==
+ if (__wt_atomic_add32(&async->flush_count, 1) ==
conn->async_workers) {
/*
* We're last. All workers accounted for so
diff --git a/src/third_party/wiredtiger/src/block/block_open.c b/src/third_party/wiredtiger/src/block/block_open.c
index e1b7894aee5..9223c1ad8e4 100644
--- a/src/third_party/wiredtiger/src/block/block_open.c
+++ b/src/third_party/wiredtiger/src/block/block_open.c
@@ -159,9 +159,9 @@ __wt_block_configure_first_fit(WT_BLOCK *block, int on)
* as long as any operation wants it.
*/
if (on)
- (void)WT_ATOMIC_ADD4(block->allocfirst, 1);
+ (void)__wt_atomic_add32(&block->allocfirst, 1);
else
- (void)WT_ATOMIC_SUB4(block->allocfirst, 1);
+ (void)__wt_atomic_sub32(&block->allocfirst, 1);
}
/*
diff --git a/src/third_party/wiredtiger/src/btree/bt_curnext.c b/src/third_party/wiredtiger/src/btree/bt_curnext.c
index d80a5f4740d..5e866dc9233 100644
--- a/src/third_party/wiredtiger/src/btree/bt_curnext.c
+++ b/src/third_party/wiredtiger/src/btree/bt_curnext.c
@@ -144,7 +144,8 @@ new_page: if (cbt->ins == NULL)
if ((upd = __wt_txn_read(session, cbt->ins->upd)) == NULL)
continue;
if (WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
val->data = WT_UPDATE_DATA(upd);
@@ -200,7 +201,8 @@ new_page: /* Find the matching WT_COL slot. */
NULL : __wt_txn_read(session, cbt->ins->upd);
if (upd != NULL) {
if (WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
@@ -285,7 +287,8 @@ new_insert: if ((ins = cbt->ins) != NULL) {
if ((upd = __wt_txn_read(session, ins->upd)) == NULL)
continue;
if (WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
key->data = WT_INSERT_KEY(ins);
@@ -317,7 +320,8 @@ new_insert: if ((ins = cbt->ins) != NULL) {
rip = &page->pg_row_d[cbt->slot];
upd = __wt_txn_read(session, WT_ROW_UPDATE(page, rip));
if (upd != NULL && WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
diff --git a/src/third_party/wiredtiger/src/btree/bt_curprev.c b/src/third_party/wiredtiger/src/btree/bt_curprev.c
index f1ca81ee145..08388938080 100644
--- a/src/third_party/wiredtiger/src/btree/bt_curprev.c
+++ b/src/third_party/wiredtiger/src/btree/bt_curprev.c
@@ -281,7 +281,8 @@ new_page: if (cbt->ins == NULL)
if ((upd = __wt_txn_read(session, cbt->ins->upd)) == NULL)
continue;
if (WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
val->data = WT_UPDATE_DATA(upd);
@@ -338,7 +339,8 @@ new_page: if (cbt->recno < page->pg_var_recno)
NULL : __wt_txn_read(session, cbt->ins->upd);
if (upd != NULL) {
if (WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
@@ -434,7 +436,8 @@ new_insert: if ((ins = cbt->ins) != NULL) {
if ((upd = __wt_txn_read(session, ins->upd)) == NULL)
continue;
if (WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
key->data = WT_INSERT_KEY(ins);
@@ -468,7 +471,8 @@ new_insert: if ((ins = cbt->ins) != NULL) {
rip = &page->pg_row_d[cbt->slot];
upd = __wt_txn_read(session, WT_ROW_UPDATE(page, rip));
if (upd != NULL && WT_UPDATE_DELETED_ISSET(upd)) {
- ++cbt->page_deleted_count;
+ if (__wt_txn_visible_all(session, upd->txnid))
+ ++cbt->page_deleted_count;
continue;
}
diff --git a/src/third_party/wiredtiger/src/btree/bt_delete.c b/src/third_party/wiredtiger/src/btree/bt_delete.c
index 8cca6328f21..cddfa0ef801 100644
--- a/src/third_party/wiredtiger/src/btree/bt_delete.c
+++ b/src/third_party/wiredtiger/src/btree/bt_delete.c
@@ -70,15 +70,15 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp)
/* If we have a clean page in memory, attempt to evict it. */
if (ref->state == WT_REF_MEM &&
- WT_ATOMIC_CAS4(ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
+ __wt_atomic_casv32(&ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
if (__wt_page_is_modified(ref->page)) {
WT_PUBLISH(ref->state, WT_REF_MEM);
return (0);
}
- (void)WT_ATOMIC_ADD4(S2BT(session)->evict_busy, 1);
+ (void)__wt_atomic_addv32(&S2BT(session)->evict_busy, 1);
ret = __wt_evict_page(session, ref);
- (void)WT_ATOMIC_SUB4(S2BT(session)->evict_busy, 1);
+ (void)__wt_atomic_subv32(&S2BT(session)->evict_busy, 1);
WT_RET_BUSY_OK(ret);
}
@@ -93,7 +93,7 @@ __wt_delete_page(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp)
* unclear optimizing for overlapping range deletes is worth the effort.
*/
if (ref->state != WT_REF_DISK ||
- !WT_ATOMIC_CAS4(ref->state, WT_REF_DISK, WT_REF_LOCKED))
+ !__wt_atomic_casv32(&ref->state, WT_REF_DISK, WT_REF_LOCKED))
return (0);
/*
@@ -176,8 +176,8 @@ __wt_delete_page_rollback(WT_SESSION_IMPL *session, WT_REF *ref)
* If the page is still "deleted", it's as we left it,
* reset the state.
*/
- if (WT_ATOMIC_CAS4(
- ref->state, WT_REF_DELETED, WT_REF_DISK))
+ if (__wt_atomic_casv32(
+ &ref->state, WT_REF_DELETED, WT_REF_DISK))
return;
break;
case WT_REF_LOCKED:
@@ -242,7 +242,7 @@ __wt_delete_page_skip(WT_SESSION_IMPL *session, WT_REF *ref)
if (ref->page_del == NULL)
return (1);
- if (!WT_ATOMIC_CAS4(ref->state, WT_REF_DELETED, WT_REF_LOCKED))
+ if (!__wt_atomic_casv32(&ref->state, WT_REF_DELETED, WT_REF_LOCKED))
return (0);
skip = (ref->page_del == NULL ||
diff --git a/src/third_party/wiredtiger/src/btree/bt_page.c b/src/third_party/wiredtiger/src/btree/bt_page.c
index b8b67720fce..3ae3ee23e58 100644
--- a/src/third_party/wiredtiger/src/btree/bt_page.c
+++ b/src/third_party/wiredtiger/src/btree/bt_page.c
@@ -81,10 +81,12 @@ __wt_page_in_func(WT_SESSION_IMPL *session, WT_REF *ref, uint32_t flags
return (WT_NOTFOUND);
/*
- * The page isn't in memory, attempt to read it.
- * Make sure there is space in the cache.
+ * The page isn't in memory, read it. If this thread is
+ * allowed to do eviction work, check for space in the
+ * cache.
*/
- WT_RET(__wt_cache_full_check(session));
+ if (!LF_ISSET(WT_READ_NO_EVICT))
+ WT_RET(__wt_cache_full_check(session));
WT_RET(__wt_cache_read(session, ref));
oldgen = LF_ISSET(WT_READ_WONT_NEED) ||
F_ISSET(session, WT_SESSION_NO_CACHE);
@@ -185,6 +187,13 @@ __wt_page_in_func(WT_SESSION_IMPL *session, WT_REF *ref, uint32_t flags
if (++wait_cnt < 1000)
__wt_yield();
else {
+ /*
+ * If stalling and this thread is allowed to do eviction
+ * work, check if the cache needs help.
+ */
+ if (!LF_ISSET(WT_READ_NO_EVICT))
+ WT_RET(__wt_cache_full_check(session));
+
sleep_cnt = WT_MIN(wait_cnt, 10000);
wait_cnt *= 2;
WT_STAT_FAST_CONN_INCRV(session, page_sleep, sleep_cnt);
@@ -295,8 +304,8 @@ err: if ((pindex = WT_INTL_INDEX_GET_SAFE(page)) != NULL) {
/* Increment the cache statistics. */
__wt_cache_page_inmem_incr(session, page, size);
- (void)WT_ATOMIC_ADD8(cache->bytes_read, size);
- (void)WT_ATOMIC_ADD8(cache->pages_inmem, 1);
+ (void)__wt_atomic_add64(&cache->bytes_read, size);
+ (void)__wt_atomic_add64(&cache->pages_inmem, 1);
*pagep = page;
return (0);
diff --git a/src/third_party/wiredtiger/src/btree/bt_read.c b/src/third_party/wiredtiger/src/btree/bt_read.c
index e27f7c3398c..a3ce39b7758 100644
--- a/src/third_party/wiredtiger/src/btree/bt_read.c
+++ b/src/third_party/wiredtiger/src/btree/bt_read.c
@@ -18,8 +18,8 @@ __wt_cache_read(WT_SESSION_IMPL *session, WT_REF *ref)
WT_DECL_RET;
WT_ITEM tmp;
WT_PAGE *page;
- WT_PAGE_STATE previous_state;
size_t addr_size;
+ uint32_t previous_state;
const uint8_t *addr;
page = NULL;
@@ -35,9 +35,9 @@ __wt_cache_read(WT_SESSION_IMPL *session, WT_REF *ref)
* WT_REF_LOCKED, for deleted pages. If successful, we've won the
* race, read the page.
*/
- if (WT_ATOMIC_CAS4(ref->state, WT_REF_DISK, WT_REF_READING))
+ if (__wt_atomic_casv32(&ref->state, WT_REF_DISK, WT_REF_READING))
previous_state = WT_REF_DISK;
- else if (WT_ATOMIC_CAS4(ref->state, WT_REF_DELETED, WT_REF_LOCKED))
+ else if (__wt_atomic_casv32(&ref->state, WT_REF_DELETED, WT_REF_LOCKED))
previous_state = WT_REF_DELETED;
else
return (0);
diff --git a/src/third_party/wiredtiger/src/btree/bt_split.c b/src/third_party/wiredtiger/src/btree/bt_split.c
index acef71f1d94..98102b76398 100644
--- a/src/third_party/wiredtiger/src/btree/bt_split.c
+++ b/src/third_party/wiredtiger/src/btree/bt_split.c
@@ -520,7 +520,7 @@ __split_deepen(WT_SESSION_IMPL *session, WT_PAGE *parent, uint32_t children)
*/
WT_ASSERT(session, WT_INTL_INDEX_GET_SAFE(parent) == pindex);
WT_INTL_INDEX_SET(parent, alloc_index);
- split_gen = WT_ATOMIC_ADD8(S2C(session)->split_gen, 1);
+ split_gen = __wt_atomic_addv64(&S2C(session)->split_gen, 1);
panic = 1;
#ifdef HAVE_DIAGNOSTIC
@@ -841,6 +841,11 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref,
* update the parent's index, it will no longer refer to the child, and
* could conceivably be evicted. Get a hazard pointer on the parent
* now, so that we can safely access it after updating the index.
+ *
+ * Take care getting the page doesn't trigger eviction work: we could
+ * block trying to split a different child of our parent and deadlock
+ * or we could be the eviction server relied upon by other threads to
+ * populate the eviction queue.
*/
if (!__wt_ref_is_root(parent_ref = parent->pg_intl_parent_ref)) {
WT_ERR(__wt_page_in(session, parent_ref, WT_READ_NO_EVICT));
@@ -862,8 +867,8 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref,
WT_ASSERT(session, next_ref->state != WT_REF_SPLIT);
if (next_ref->state == WT_REF_DELETED &&
__wt_delete_page_skip(session, next_ref) &&
- WT_ATOMIC_CAS4(next_ref->state,
- WT_REF_DELETED, WT_REF_SPLIT))
+ __wt_atomic_casv32(
+ &next_ref->state, WT_REF_DELETED, WT_REF_SPLIT))
deleted_entries++;
}
@@ -908,7 +913,7 @@ __split_parent(WT_SESSION_IMPL *session, WT_REF *ref,
*/
WT_ASSERT(session, WT_INTL_INDEX_GET_SAFE(parent) == pindex);
WT_INTL_INDEX_SET(parent, alloc_index);
- split_gen = WT_ATOMIC_ADD8(S2C(session)->split_gen, 1);
+ split_gen = __wt_atomic_addv64(&S2C(session)->split_gen, 1);
alloc_index = NULL;
#ifdef HAVE_DIAGNOSTIC
diff --git a/src/third_party/wiredtiger/src/btree/bt_vrfy_dsk.c b/src/third_party/wiredtiger/src/btree/bt_vrfy_dsk.c
index d6c300d200a..dbaf18c69f2 100644
--- a/src/third_party/wiredtiger/src/btree/bt_vrfy_dsk.c
+++ b/src/third_party/wiredtiger/src/btree/bt_vrfy_dsk.c
@@ -218,7 +218,7 @@ __verify_dsk_row(
++cell_num;
/* Carefully unpack the cell. */
- if (__wt_cell_unpack_safe(cell, unpack, end) != 0) {
+ if (__wt_cell_unpack_safe(cell, unpack, dsk, end) != 0) {
ret = __err_cell_corrupted(session, cell_num, addr);
goto err;
}
@@ -485,7 +485,7 @@ __verify_dsk_col_int(
++cell_num;
/* Carefully unpack the cell. */
- if (__wt_cell_unpack_safe(cell, unpack, end) != 0)
+ if (__wt_cell_unpack_safe(cell, unpack, dsk, end) != 0)
return (__err_cell_corrupted(session, cell_num, addr));
/* Check the raw and collapsed cell types. */
@@ -552,7 +552,7 @@ __verify_dsk_col_var(
++cell_num;
/* Carefully unpack the cell. */
- if (__wt_cell_unpack_safe(cell, unpack, end) != 0)
+ if (__wt_cell_unpack_safe(cell, unpack, dsk, end) != 0)
return (__err_cell_corrupted(session, cell_num, addr));
/* Check the raw and collapsed cell types. */
diff --git a/src/third_party/wiredtiger/src/btree/row_key.c b/src/third_party/wiredtiger/src/btree/row_key.c
index f2868afe13a..4affa7fa62a 100644
--- a/src/third_party/wiredtiger/src/btree/row_key.c
+++ b/src/third_party/wiredtiger/src/btree/row_key.c
@@ -448,7 +448,8 @@ next: switch (direction) {
* update the page's memory footprint, on failure, free
* the allocated memory.
*/
- if (WT_ATOMIC_CAS8(WT_ROW_KEY_COPY(rip), copy, ikey))
+ if (__wt_atomic_cas_ptr(
+ (void *)&WT_ROW_KEY_COPY(rip), copy, ikey))
__wt_cache_page_inmem_incr(session,
page, sizeof(WT_IKEY) + ikey->size);
else
@@ -525,7 +526,7 @@ __wt_row_ikey(WT_SESSION_IMPL *session,
WT_ASSERT(session, oldv == 0 || (oldv & WT_IK_FLAG) != 0);
WT_ASSERT(session, ref->state != WT_REF_SPLIT);
WT_ASSERT(session,
- WT_ATOMIC_CAS8(ref->key.ikey, (WT_IKEY *)oldv, ikey));
+ __wt_atomic_cas_ptr(&ref->key.ikey, (WT_IKEY *)oldv, ikey));
}
#else
ref->key.ikey = ikey;
diff --git a/src/third_party/wiredtiger/src/btree/row_modify.c b/src/third_party/wiredtiger/src/btree/row_modify.c
index 2dd42de5900..6a9ad1c78b9 100644
--- a/src/third_party/wiredtiger/src/btree/row_modify.c
+++ b/src/third_party/wiredtiger/src/btree/row_modify.c
@@ -34,7 +34,7 @@ __wt_page_modify_alloc(WT_SESSION_IMPL *session, WT_PAGE *page)
* footprint, else discard the modify structure, another thread did the
* work.
*/
- if (WT_ATOMIC_CAS8(page->modify, NULL, modify))
+ if (__wt_atomic_cas_ptr(&page->modify, NULL, modify))
__wt_cache_page_inmem_incr(session, page, sizeof(*modify));
else
__wt_free(session, modify);
@@ -318,7 +318,7 @@ __wt_update_obsolete_check(
*/
if (first != NULL &&
(next = first->next) != NULL &&
- WT_ATOMIC_CAS8(first->next, next, NULL))
+ __wt_atomic_cas_ptr(&first->next, next, NULL))
return (next);
/*
diff --git a/src/third_party/wiredtiger/src/config/config_def.c b/src/third_party/wiredtiger/src/config/config_def.c
index 2dfcf862cc8..75252afae34 100644
--- a/src/third_party/wiredtiger/src/config/config_def.c
+++ b/src/third_party/wiredtiger/src/config/config_def.c
@@ -299,6 +299,11 @@ static const WT_CONFIG_CHECK confchk_session_drop[] = {
{ NULL, NULL, NULL, NULL, NULL }
};
+static const WT_CONFIG_CHECK confchk_session_log_flush[] = {
+ { "sync", "string", NULL, "choices=[\"off\",\"on\"]", NULL },
+ { NULL, NULL, NULL, NULL, NULL }
+};
+
static const WT_CONFIG_CHECK confchk_session_open_cursor[] = {
{ "append", "boolean", NULL, NULL, NULL },
{ "bulk", "string", NULL, NULL, NULL },
@@ -742,6 +747,10 @@ static const WT_CONFIG_ENTRY config_entries[] = {
"force=0,remove_files=",
confchk_session_drop
},
+ { "session.log_flush",
+ "sync=on",
+ confchk_session_log_flush
+ },
{ "session.log_printf",
"",
NULL
diff --git a/src/third_party/wiredtiger/src/evict/evict_lru.c b/src/third_party/wiredtiger/src/evict/evict_lru.c
index 4cb29d4e1b9..f077f168a26 100644
--- a/src/third_party/wiredtiger/src/evict/evict_lru.c
+++ b/src/third_party/wiredtiger/src/evict/evict_lru.c
@@ -158,6 +158,8 @@ __evict_server(void *arg)
WT_CONNECTION_IMPL *conn;
WT_DECL_RET;
WT_SESSION_IMPL *session;
+ u_int spins;
+ WT_DECL_SPINLOCK_ID(id);
session = arg;
conn = S2C(session);
@@ -175,7 +177,27 @@ __evict_server(void *arg)
* otherwise we can block applications evicting large pages.
*/
if (!F_ISSET(cache, WT_CACHE_STUCK)) {
- WT_ERR(__evict_clear_walks(session));
+ for (spins = 0; (ret = __wt_spin_trylock(
+ session, &conn->dhandle_lock, &id)) == EBUSY &&
+ !F_ISSET(cache, WT_CACHE_CLEAR_WALKS);
+ spins++) {
+ if (spins < 1000)
+ __wt_yield();
+ else
+ __wt_sleep(0, 1000);
+ }
+ /*
+ * If we gave up acquiring the lock, that indicates a
+ * session is waiting for us to clear walks. Do that
+ * as part of a normal pass (without the handle list
+ * lock) to avoid deadlock.
+ */
+ if (ret == EBUSY)
+ continue;
+ WT_ERR(ret);
+ ret = __evict_clear_all_walks(session);
+ __wt_spin_unlock(session, &conn->dhandle_lock);
+ WT_ERR(ret);
/* Next time we wake up, reverse the sweep direction. */
cache->flags ^= WT_CACHE_WALK_REVERSE;
@@ -476,6 +498,18 @@ __evict_pass(WT_SESSION_IMPL *session)
*/
__wt_cache_read_gen_incr(session);
+ /*
+ * Update the oldest ID: we use it to decide whether pages are
+ * candidates for eviction. Without this, if all threads are
+ * blocked after a long-running transaction (such as a
+ * checkpoint) completes, we may never start evicting again.
+ *
+ * Do this every time the eviction server wakes up, regardless
+ * of whether the cache is full, to prevent the oldest ID
+ * falling too far behind.
+ */
+ __wt_txn_update_oldest(session, 1);
+
WT_RET(__evict_has_work(session, &flags));
if (flags == 0)
break;
@@ -921,14 +955,6 @@ __evict_walk(WT_SESSION_IMPL *session, uint32_t flags)
incr = dhandle_locked = 0;
retries = 0;
- /*
- * Update the oldest ID: we use it to decide whether pages are
- * candidates for eviction. Without this, if all threads are blocked
- * after a long-running transaction (such as a checkpoint) completes,
- * we may never start evicting again.
- */
- __wt_txn_update_oldest(session, 1);
-
if (cache->evict_current == NULL)
WT_STAT_FAST_CONN_INCR(session, cache_eviction_queue_empty);
else
@@ -981,7 +1007,8 @@ retry: while (slot < max_entries && ret == 0) {
} else {
if (incr) {
WT_ASSERT(session, dhandle->session_inuse > 0);
- (void)WT_ATOMIC_SUB4(dhandle->session_inuse, 1);
+ (void)__wt_atomic_subi32(
+ &dhandle->session_inuse, 1);
incr = 0;
}
dhandle = TAILQ_NEXT(dhandle, q);
@@ -1025,7 +1052,7 @@ retry: while (slot < max_entries && ret == 0) {
btree->evict_walk_skips = 0;
prev_slot = slot;
- (void)WT_ATOMIC_ADD4(dhandle->session_inuse, 1);
+ (void)__wt_atomic_addi32(&dhandle->session_inuse, 1);
incr = 1;
__wt_spin_unlock(session, &conn->dhandle_lock);
dhandle_locked = 0;
@@ -1060,7 +1087,7 @@ retry: while (slot < max_entries && ret == 0) {
cache->evict_file_next = dhandle;
WT_ASSERT(session, dhandle->session_inuse > 0);
- (void)WT_ATOMIC_SUB4(dhandle->session_inuse, 1);
+ (void)__wt_atomic_subi32(&dhandle->session_inuse, 1);
incr = 0;
}
@@ -1259,14 +1286,15 @@ fast: /* If the page can't be evicted, give up. */
* If we happen to end up on the root page, clear it. We have to track
* hazard pointers, and the root page complicates that calculation.
*
- * Also clear the walk if we land on a page requiring forced eviction.
- * The eviction server may go to sleep, and we want this page evicted
- * as quickly as possible.
+ * If we land on a page requiring forced eviction, move on to the next
+ * page: we want this page evicted as quickly as possible.
*/
- if ((ref = btree->evict_ref) != NULL && (__wt_ref_is_root(ref) ||
- ref->page->read_gen == WT_READGEN_OLDEST)) {
- btree->evict_ref = NULL;
- __wt_page_release(session, ref, WT_READ_NO_EVICT);
+ if ((ref = btree->evict_ref) != NULL) {
+ if (__wt_ref_is_root(ref))
+ WT_RET(__evict_clear_walk(session));
+ else if (ref->page->read_gen == WT_READGEN_OLDEST)
+ WT_RET_NOTFOUND_OK(__wt_tree_walk(session,
+ &btree->evict_ref, &pages_walked, walk_flags));
}
/* If the walk was interrupted by a locked page, that's okay. */
@@ -1331,8 +1359,8 @@ __evict_get_ref(
* multiple attempts to evict it. For pages that are already
* being evicted, this operation will fail and we will move on.
*/
- if (!WT_ATOMIC_CAS4(
- evict->ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
+ if (!__wt_atomic_casv32(
+ &evict->ref->state, WT_REF_MEM, WT_REF_LOCKED)) {
__evict_list_clear(session, evict);
continue;
}
@@ -1341,7 +1369,7 @@ __evict_get_ref(
* Increment the busy count in the btree handle to prevent it
* from being closed under us.
*/
- (void)WT_ATOMIC_ADD4(evict->btree->evict_busy, 1);
+ (void)__wt_atomic_addv32(&evict->btree->evict_busy, 1);
*btreep = evict->btree;
*refp = evict->ref;
@@ -1420,7 +1448,7 @@ __wt_evict_lru_page(WT_SESSION_IMPL *session, int is_server)
WT_WITH_BTREE(session, btree, ret = __wt_evict_page(session, ref));
- (void)WT_ATOMIC_SUB4(btree->evict_busy, 1);
+ (void)__wt_atomic_subv32(&btree->evict_busy, 1);
WT_RET(ret);
@@ -1496,19 +1524,6 @@ __wt_cache_wait(WT_SESSION_IMPL *session, int full)
else if (ret == 0)
continue;
- /*
- * The cache is still full and no pages were found in the queue
- * to evict. If this transaction is the one holding back the
- * oldest ID, we can't wait forever. We'll block next time we
- * are not busy.
- */
- if (busy) {
- __wt_txn_update_oldest(session, 0);
- if (txn_state->id == txn_global->oldest_id ||
- txn_state->snap_min == txn_global->oldest_id)
- return (0);
- }
-
/* Wait for the queue to re-populate before trying again. */
WT_RET(__wt_cond_wait(session,
S2C(session)->cache->evict_waiter_cond, 100000));
diff --git a/src/third_party/wiredtiger/src/include/async.h b/src/third_party/wiredtiger/src/include/async.h
index c8d9fa5aa91..fb9a64e774d 100644
--- a/src/third_party/wiredtiger/src/include/async.h
+++ b/src/third_party/wiredtiger/src/include/async.h
@@ -6,20 +6,6 @@
* See the file LICENSE for redistribution information.
*/
-typedef enum {
- WT_ASYNCOP_ENQUEUED, /* Placed on the work queue */
- WT_ASYNCOP_FREE, /* Able to be allocated to user */
- WT_ASYNCOP_READY, /* Allocated and ready for user to use */
- WT_ASYNCOP_WORKING /* Operation in progress by worker */
-} WT_ASYNC_STATE;
-
-typedef enum {
- WT_ASYNC_FLUSH_NONE=0, /* No flush in progress */
- WT_ASYNC_FLUSH_COMPLETE, /* Notify flush caller it's done */
- WT_ASYNC_FLUSH_IN_PROGRESS, /* Prevent other callers */
- WT_ASYNC_FLUSHING /* Notify workers */
-} WT_ASYNC_FLUSH_STATE;
-
#define MAX_ASYNC_SLEEP_USECS 100000 /* Maximum sleep waiting for work */
#define MAX_ASYNC_YIELD 200 /* Maximum number of yields for work */
@@ -53,7 +39,13 @@ struct __wt_async_op_impl {
uint64_t unique_id; /* Unique identifier. */
WT_ASYNC_FORMAT *format; /* Format structure */
- WT_ASYNC_STATE state; /* Op state */
+
+#define WT_ASYNCOP_ENQUEUED 0 /* Placed on the work queue */
+#define WT_ASYNCOP_FREE 1 /* Able to be allocated to user */
+#define WT_ASYNCOP_READY 2 /* Allocated, ready for user to use */
+#define WT_ASYNCOP_WORKING 3 /* Operation in progress by worker */
+ uint32_t state;
+
WT_ASYNC_OPTYPE optype; /* Operation type */
};
@@ -89,9 +81,15 @@ struct __wt_async {
uint64_t tail_slot; /* Worker slot consumed */
TAILQ_HEAD(__wt_async_format_qh, __wt_async_format) formatqh;
- int cur_queue; /* Currently enqueued */
- int max_queue; /* Maximum enqueued */
- WT_ASYNC_FLUSH_STATE flush_state; /* Queue flush state */
+ uint32_t cur_queue; /* Currently enqueued */
+ uint32_t max_queue; /* Maximum enqueued */
+
+#define WT_ASYNC_FLUSH_NONE 0 /* No flush in progress */
+#define WT_ASYNC_FLUSH_COMPLETE 1 /* Notify flush caller done */
+#define WT_ASYNC_FLUSH_IN_PROGRESS 2 /* Prevent other callers */
+#define WT_ASYNC_FLUSHING 3 /* Notify workers */
+ uint32_t flush_state;
+
/* Notify any waiting threads when flushing is done. */
WT_CONDVAR *flush_cond;
WT_ASYNC_OP_IMPL flush_op; /* Special flush op */
diff --git a/src/third_party/wiredtiger/src/include/btmem.h b/src/third_party/wiredtiger/src/include/btmem.h
index 1c04af1aef3..2933add6201 100644
--- a/src/third_party/wiredtiger/src/include/btmem.h
+++ b/src/third_party/wiredtiger/src/include/btmem.h
@@ -648,14 +648,6 @@ struct __wt_page {
* to the readers. If the evicting thread does not find a hazard pointer,
* the page is evicted.
*/
-typedef enum __wt_page_state {
- WT_REF_DISK=0, /* Page is on disk */
- WT_REF_DELETED, /* Page is on disk, but deleted */
- WT_REF_LOCKED, /* Page locked for exclusive access */
- WT_REF_MEM, /* Page is in cache and valid */
- WT_REF_READING, /* Page being read */
- WT_REF_SPLIT /* Page was split */
-} WT_PAGE_STATE;
/*
* WT_PAGE_DELETED --
@@ -683,7 +675,13 @@ struct __wt_ref {
WT_PAGE * volatile home; /* Reference page */
uint32_t pindex_hint; /* Reference page index hint */
- volatile WT_PAGE_STATE state; /* Page state */
+#define WT_REF_DISK 0 /* Page is on disk */
+#define WT_REF_DELETED 1 /* Page is on disk, but deleted */
+#define WT_REF_LOCKED 2 /* Page locked for exclusive access */
+#define WT_REF_MEM 3 /* Page is in cache and valid */
+#define WT_REF_READING 4 /* Page being read */
+#define WT_REF_SPLIT 5 /* Parent page split (WT_REF dead) */
+ volatile uint32_t state; /* Page state */
/*
* Address: on-page cell if read from backing block, off-page WT_ADDR
@@ -950,7 +948,7 @@ struct __wt_insert {
#define WT_PAGE_ALLOC_AND_SWAP(s, page, dest, v, count) do { \
if (((v) = (dest)) == NULL) { \
WT_ERR(__wt_calloc_def(s, count, &(v))); \
- if (WT_ATOMIC_CAS8(dest, NULL, v)) \
+ if (__wt_atomic_cas_ptr(&dest, NULL, v)) \
__wt_cache_page_inmem_incr( \
s, page, (count) * sizeof(*(v))); \
else \
diff --git a/src/third_party/wiredtiger/src/include/btree.i b/src/third_party/wiredtiger/src/include/btree.i
index 23cb54a4179..5db6ff1f002 100644
--- a/src/third_party/wiredtiger/src/include/btree.i
+++ b/src/third_party/wiredtiger/src/include/btree.i
@@ -49,46 +49,74 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size)
WT_ASSERT(session, size < WT_EXABYTE);
cache = S2C(session)->cache;
- (void)WT_ATOMIC_ADD8(cache->bytes_inmem, size);
- (void)WT_ATOMIC_ADD8(page->memory_footprint, size);
+ (void)__wt_atomic_add64(&cache->bytes_inmem, size);
+ (void)__wt_atomic_addsize(&page->memory_footprint, size);
if (__wt_page_is_modified(page)) {
- (void)WT_ATOMIC_ADD8(cache->bytes_dirty, size);
- (void)WT_ATOMIC_ADD8(page->modify->bytes_dirty, size);
+ (void)__wt_atomic_add64(&cache->bytes_dirty, size);
+ (void)__wt_atomic_addsize(&page->modify->bytes_dirty, size);
}
/* Track internal and overflow size in cache. */
if (WT_PAGE_IS_INTERNAL(page))
- (void)WT_ATOMIC_ADD8(cache->bytes_internal, size);
+ (void)__wt_atomic_add64(&cache->bytes_internal, size);
else if (page->type == WT_PAGE_OVFL)
- (void)WT_ATOMIC_ADD8(cache->bytes_overflow, size);
+ (void)__wt_atomic_add64(&cache->bytes_overflow, size);
}
-/*
- * WT_CACHE_DECR --
- * Macro to decrement a field by a size.
- *
- * Be defensive and don't underflow: a band-aid on a gaping wound, but underflow
- * won't make things better no matter the problem (specifically, underflow makes
- * eviction crazy trying to evict non-existent memory).
+/*
+ * __wt_cache_decr_check_size --
+ * Decrement a size_t cache value and check for underflow.
+ */
+static inline void
+__wt_cache_decr_check_size(
+ WT_SESSION_IMPL *session, size_t *vp, size_t v, const char *fld)
+{
+ if (__wt_atomic_subsize(vp, v) < WT_EXABYTE)
+ return;
+
+#ifdef HAVE_DIAGNOSTIC
+ (void)__wt_atomic_addsize(vp, v);
+
+ {
+ static int first = 1;
+
+ if (!first)
+ return;
+ __wt_errx(session, "%s underflow: decrementing %" WT_SIZET_FMT, fld, v);
+ first = 0;
+ }
+#else
+ WT_UNUSED(fld);
+ WT_UNUSED(session);
+#endif
+}
+
+/*
+ * __wt_cache_decr_check_uint64 --
+ * Decrement a uint64_t cache value and check for underflow.
*/
+static inline void
+__wt_cache_decr_check_uint64(
+ WT_SESSION_IMPL *session, uint64_t *vp, size_t v, const char *fld)
+{
+ if (__wt_atomic_sub64(vp, v) < WT_EXABYTE)
+ return;
+
#ifdef HAVE_DIAGNOSTIC
-#define WT_CACHE_DECR(session, f, sz) do { \
- static int __first = 1; \
- if (WT_ATOMIC_SUB8(f, sz) > WT_EXABYTE) { \
- (void)WT_ATOMIC_ADD8(f, sz); \
- if (__first) { \
- __wt_errx(session, \
- "%s underflow: decrementing %" WT_SIZET_FMT,\
- #f, sz); \
- __first = 0; \
- } \
- } \
-} while (0)
+ (void)__wt_atomic_add64(vp, v);
+
+ {
+ static int first = 1;
+
+ if (!first)
+ return;
+ __wt_errx(session, "%s underflow: decrementing %" WT_SIZET_FMT, fld, v);
+ first = 0;
+ }
#else
-#define WT_CACHE_DECR(s, f, sz) do { \
- if (WT_ATOMIC_SUB8(f, sz) > WT_EXABYTE) \
- (void)WT_ATOMIC_ADD8(f, sz); \
-} while (0)
+ WT_UNUSED(fld);
+ WT_UNUSED(session);
#endif
+}
/*
* __wt_cache_page_byte_dirty_decr --
@@ -128,9 +156,10 @@ __wt_cache_page_byte_dirty_decr(
*/
orig = page->modify->bytes_dirty;
decr = WT_MIN(size, orig);
- if (WT_ATOMIC_CAS8(
- page->modify->bytes_dirty, orig, orig - decr)) {
- WT_CACHE_DECR(session, cache->bytes_dirty, decr);
+ if (__wt_atomic_cassize(
+ &page->modify->bytes_dirty, orig, orig - decr)) {
+ __wt_cache_decr_check_uint64(session,
+ &cache->bytes_dirty, decr, "WT_CACHE.bytes_dirty");
break;
}
}
@@ -149,15 +178,19 @@ __wt_cache_page_inmem_decr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size)
WT_ASSERT(session, size < WT_EXABYTE);
- WT_CACHE_DECR(session, cache->bytes_inmem, size);
- WT_CACHE_DECR(session, page->memory_footprint, size);
+ __wt_cache_decr_check_uint64(
+ session, &cache->bytes_inmem, size, "WT_CACHE.bytes_inmem");
+ __wt_cache_decr_check_size(
+ session, &page->memory_footprint, size, "WT_PAGE.memory_footprint");
if (__wt_page_is_modified(page))
__wt_cache_page_byte_dirty_decr(session, page, size);
/* Track internal and overflow size in cache. */
if (WT_PAGE_IS_INTERNAL(page))
- WT_CACHE_DECR(session, cache->bytes_internal, size);
+ __wt_cache_decr_check_uint64(session,
+ &cache->bytes_internal, size, "WT_CACHE.bytes_internal");
else if (page->type == WT_PAGE_OVFL)
- WT_CACHE_DECR(session, cache->bytes_overflow, size);
+ __wt_cache_decr_check_uint64(session,
+ &cache->bytes_overflow, size, "WT_CACHE.bytes_overflow");
}
/*
@@ -172,15 +205,15 @@ __wt_cache_dirty_incr(WT_SESSION_IMPL *session, WT_PAGE *page)
size_t size;
cache = S2C(session)->cache;
- (void)WT_ATOMIC_ADD8(cache->pages_dirty, 1);
+ (void)__wt_atomic_add64(&cache->pages_dirty, 1);
/*
* Take care to read the memory_footprint once in case we are racing
* with updates.
*/
size = page->memory_footprint;
- (void)WT_ATOMIC_ADD8(cache->bytes_dirty, size);
- (void)WT_ATOMIC_ADD8(page->modify->bytes_dirty, size);
+ (void)__wt_atomic_add64(&cache->bytes_dirty, size);
+ (void)__wt_atomic_addsize(&page->modify->bytes_dirty, size);
}
/*
@@ -202,7 +235,7 @@ __wt_cache_dirty_decr(WT_SESSION_IMPL *session, WT_PAGE *page)
"count went negative");
cache->pages_dirty = 0;
} else
- (void)WT_ATOMIC_SUB8(cache->pages_dirty, 1);
+ (void)__wt_atomic_sub64(&cache->pages_dirty, 1);
modify = page->modify;
if (modify != NULL && modify->bytes_dirty != 0)
@@ -224,7 +257,15 @@ __wt_cache_page_evict(WT_SESSION_IMPL *session, WT_PAGE *page)
modify = page->modify;
/* Update the bytes in-memory to reflect the eviction. */
- WT_CACHE_DECR(session, cache->bytes_inmem, page->memory_footprint);
+ __wt_cache_decr_check_uint64(session,
+ &cache->bytes_inmem,
+ page->memory_footprint, "WT_CACHE.bytes_inmem");
+
+ /* Update the bytes_internal value to reflect the eviction */
+ if (WT_PAGE_IS_INTERNAL(page))
+ __wt_cache_decr_check_uint64(session,
+ &cache->bytes_internal,
+ page->memory_footprint, "WT_CACHE.bytes_internal");
/* Update the cache's dirty-byte count. */
if (modify != NULL && modify->bytes_dirty != 0) {
@@ -234,13 +275,14 @@ __wt_cache_page_evict(WT_SESSION_IMPL *session, WT_PAGE *page)
"dirty byte count went negative");
cache->bytes_dirty = 0;
} else
- WT_CACHE_DECR(
- session, cache->bytes_dirty, modify->bytes_dirty);
+ __wt_cache_decr_check_uint64(session,
+ &cache->bytes_dirty,
+ modify->bytes_dirty, "WT_CACHE.bytes_dirty");
}
/* Update pages and bytes evicted. */
- (void)WT_ATOMIC_ADD8(cache->bytes_evict, page->memory_footprint);
- (void)WT_ATOMIC_ADD8(cache->pages_evict, 1);
+ (void)__wt_atomic_add64(&cache->bytes_evict, page->memory_footprint);
+ (void)__wt_atomic_add64(&cache->pages_evict, 1);
}
/*
@@ -301,7 +343,7 @@ __wt_page_only_modify_set(WT_SESSION_IMPL *session, WT_PAGE *page)
* Every time the page transitions from clean to dirty, update the cache
* and transactional information.
*/
- if (WT_ATOMIC_ADD4(page->modify->write_gen, 1) == 1) {
+ if (__wt_atomic_add32(&page->modify->write_gen, 1) == 1) {
__wt_cache_dirty_incr(session, page);
/*
@@ -993,14 +1035,14 @@ __wt_page_release_evict(WT_SESSION_IMPL *session, WT_REF *ref)
* reference without first locking the page, it could be evicted in
* between.
*/
- locked = WT_ATOMIC_CAS4(ref->state, WT_REF_MEM, WT_REF_LOCKED);
+ locked = __wt_atomic_casv32(&ref->state, WT_REF_MEM, WT_REF_LOCKED);
if ((ret = __wt_hazard_clear(session, page)) != 0 || !locked) {
if (locked)
ref->state = WT_REF_MEM;
return (ret == 0 ? EBUSY : ret);
}
- (void)WT_ATOMIC_ADD4(btree->evict_busy, 1);
+ (void)__wt_atomic_addv32(&btree->evict_busy, 1);
if ((ret = __wt_evict_page(session, ref)) == 0) {
if (too_big)
WT_STAT_FAST_CONN_INCR(session, cache_eviction_force);
@@ -1015,7 +1057,7 @@ __wt_page_release_evict(WT_SESSION_IMPL *session, WT_REF *ref)
} else
WT_STAT_FAST_CONN_INCR(session, cache_eviction_force_fail);
- (void)WT_ATOMIC_SUB4(btree->evict_busy, 1);
+ (void)__wt_atomic_subv32(&btree->evict_busy, 1);
return (ret);
}
diff --git a/src/third_party/wiredtiger/src/include/cell.i b/src/third_party/wiredtiger/src/include/cell.i
index 77e9fa1e3c1..880e4e2402f 100644
--- a/src/third_party/wiredtiger/src/include/cell.i
+++ b/src/third_party/wiredtiger/src/include/cell.i
@@ -547,7 +547,8 @@ __wt_cell_leaf_value_parse(WT_PAGE *page, WT_CELL *cell)
* Unpack a WT_CELL into a structure during verification.
*/
static inline int
-__wt_cell_unpack_safe(WT_CELL *cell, WT_CELL_UNPACK *unpack, uint8_t *end)
+__wt_cell_unpack_safe(
+ WT_CELL *cell, WT_CELL_UNPACK *unpack, const void *start, const void *end)
{
uint64_t saved_v, v;
uint32_t saved_len;
@@ -559,14 +560,15 @@ __wt_cell_unpack_safe(WT_CELL *cell, WT_CELL_UNPACK *unpack, uint8_t *end)
saved_v = 0;
/*
- * The verification code specifies an end argument, a pointer to 1 past
- * the end-of-page. In that case, make sure we don't go past the end
- * of the page when reading. If an error occurs, we simply return the
- * error code, the verification code takes care of complaining (and, in
- * the case of salvage, it won't complain at all, it's OK to fail).
+ * The verification code specifies start/end arguments, pointers to the
+ * start of the page and to 1 past the end-of-page. In which case, make
+ * sure all reads are inside the page image. If an error occurs, return
+ * an error code but don't output messages, our caller handles that.
*/
-#define WT_CELL_LEN_CHK(p, len) do { \
- if (end != NULL && (((uint8_t *)p) + (len)) > end) \
+#define WT_CELL_LEN_CHK(t, len) do { \
+ if (start != NULL && \
+ ((uint8_t *)t < (uint8_t *)start || \
+ (((uint8_t *)t) + (len)) > (uint8_t *)end)) \
return (WT_ERROR); \
} while (0)
@@ -620,7 +622,7 @@ restart:
*/
if (cell->__chunk[0] & WT_CELL_64V) /* skip value */
WT_RET(__wt_vunpack_uint(
- &p, end == NULL ? 0 : (size_t)(end - p), &unpack->v));
+ &p, end == NULL ? 0 : WT_PTRDIFF(end, p), &unpack->v));
/*
* Handle special actions for a few different cell types and set the
@@ -637,7 +639,7 @@ restart:
* earlier cell.
*/
WT_RET(__wt_vunpack_uint(
- &p, end == NULL ? 0 : (size_t)(end - p), &v));
+ &p, end == NULL ? 0 : WT_PTRDIFF(end, p), &v));
saved_len = WT_PTRDIFF32(p, cell);
saved_v = unpack->v;
cell = (WT_CELL *)((uint8_t *)cell - v);
@@ -666,7 +668,7 @@ restart:
* data.
*/
WT_RET(__wt_vunpack_uint(
- &p, end == NULL ? 0 : (size_t)(end - p), &v));
+ &p, end == NULL ? 0 : WT_PTRDIFF(end, p), &v));
if (unpack->raw == WT_CELL_KEY ||
unpack->raw == WT_CELL_KEY_PFX ||
@@ -707,7 +709,7 @@ done: WT_CELL_LEN_CHK(cell, unpack->__len);
static inline void
__wt_cell_unpack(WT_CELL *cell, WT_CELL_UNPACK *unpack)
{
- (void)__wt_cell_unpack_safe(cell, unpack, NULL);
+ (void)__wt_cell_unpack_safe(cell, unpack, NULL, NULL);
}
/*
diff --git a/src/third_party/wiredtiger/src/include/config.h b/src/third_party/wiredtiger/src/include/config.h
index 1f30667b8d6..8cb793b799c 100644
--- a/src/third_party/wiredtiger/src/include/config.h
+++ b/src/third_party/wiredtiger/src/include/config.h
@@ -68,21 +68,22 @@ struct __wt_config_parser_impl {
#define WT_CONFIG_ENTRY_session_compact 18
#define WT_CONFIG_ENTRY_session_create 19
#define WT_CONFIG_ENTRY_session_drop 20
-#define WT_CONFIG_ENTRY_session_log_printf 21
-#define WT_CONFIG_ENTRY_session_open_cursor 22
-#define WT_CONFIG_ENTRY_session_reconfigure 23
-#define WT_CONFIG_ENTRY_session_rename 24
-#define WT_CONFIG_ENTRY_session_rollback_transaction 25
-#define WT_CONFIG_ENTRY_session_salvage 26
-#define WT_CONFIG_ENTRY_session_strerror 27
-#define WT_CONFIG_ENTRY_session_truncate 28
-#define WT_CONFIG_ENTRY_session_upgrade 29
-#define WT_CONFIG_ENTRY_session_verify 30
-#define WT_CONFIG_ENTRY_table_meta 31
-#define WT_CONFIG_ENTRY_wiredtiger_open 32
-#define WT_CONFIG_ENTRY_wiredtiger_open_all 33
-#define WT_CONFIG_ENTRY_wiredtiger_open_basecfg 34
-#define WT_CONFIG_ENTRY_wiredtiger_open_usercfg 35
+#define WT_CONFIG_ENTRY_session_log_flush 21
+#define WT_CONFIG_ENTRY_session_log_printf 22
+#define WT_CONFIG_ENTRY_session_open_cursor 23
+#define WT_CONFIG_ENTRY_session_reconfigure 24
+#define WT_CONFIG_ENTRY_session_rename 25
+#define WT_CONFIG_ENTRY_session_rollback_transaction 26
+#define WT_CONFIG_ENTRY_session_salvage 27
+#define WT_CONFIG_ENTRY_session_strerror 28
+#define WT_CONFIG_ENTRY_session_truncate 29
+#define WT_CONFIG_ENTRY_session_upgrade 30
+#define WT_CONFIG_ENTRY_session_verify 31
+#define WT_CONFIG_ENTRY_table_meta 32
+#define WT_CONFIG_ENTRY_wiredtiger_open 33
+#define WT_CONFIG_ENTRY_wiredtiger_open_all 34
+#define WT_CONFIG_ENTRY_wiredtiger_open_basecfg 35
+#define WT_CONFIG_ENTRY_wiredtiger_open_usercfg 36
/*
* configuration section: END
* DO NOT EDIT: automatically built by dist/flags.py.
diff --git a/src/third_party/wiredtiger/src/include/cursor.i b/src/third_party/wiredtiger/src/include/cursor.i
index 47b772377c0..97739c83b8f 100644
--- a/src/third_party/wiredtiger/src/include/cursor.i
+++ b/src/third_party/wiredtiger/src/include/cursor.i
@@ -150,7 +150,7 @@ __wt_cursor_dhandle_incr_use(WT_SESSION_IMPL *session)
dhandle = session->dhandle;
/* If we open a handle with a time of death set, clear it. */
- if (WT_ATOMIC_ADD4(dhandle->session_inuse, 1) == 1 &&
+ if (__wt_atomic_addi32(&dhandle->session_inuse, 1) == 1 &&
dhandle->timeofdeath != 0)
dhandle->timeofdeath = 0;
}
@@ -168,7 +168,7 @@ __wt_cursor_dhandle_decr_use(WT_SESSION_IMPL *session)
/* If we close a handle with a time of death set, clear it. */
WT_ASSERT(session, dhandle->session_inuse > 0);
- if (WT_ATOMIC_SUB4(dhandle->session_inuse, 1) == 0 &&
+ if (__wt_atomic_subi32(&dhandle->session_inuse, 1) == 0 &&
dhandle->timeofdeath != 0)
dhandle->timeofdeath = 0;
}
diff --git a/src/third_party/wiredtiger/src/include/extern.h b/src/third_party/wiredtiger/src/include/extern.h
index a11f3dcd73c..447a74d2d67 100644
--- a/src/third_party/wiredtiger/src/include/extern.h
+++ b/src/third_party/wiredtiger/src/include/extern.h
@@ -332,6 +332,7 @@ extern int __wt_log_read(WT_SESSION_IMPL *session, WT_ITEM *record, WT_LSN *lsnp
extern int __wt_log_scan(WT_SESSION_IMPL *session, WT_LSN *lsnp, uint32_t flags, int (*func)(WT_SESSION_IMPL *session, WT_ITEM *record, WT_LSN *lsnp, WT_LSN *next_lsnp, void *cookie, int firstrecord), void *cookie);
extern int __wt_log_write(WT_SESSION_IMPL *session, WT_ITEM *record, WT_LSN *lsnp, uint32_t flags);
extern int __wt_log_vprintf(WT_SESSION_IMPL *session, const char *fmt, va_list ap);
+extern int __wt_log_flush(WT_SESSION_IMPL *session, uint32_t flags);
extern int __wt_logrec_alloc(WT_SESSION_IMPL *session, size_t size, WT_ITEM **logrecp);
extern void __wt_logrec_free(WT_SESSION_IMPL *session, WT_ITEM **logrecp);
extern int __wt_logrec_read(WT_SESSION_IMPL *session, const uint8_t **pp, const uint8_t *end, uint32_t *rectypep);
diff --git a/src/third_party/wiredtiger/src/include/gcc.h b/src/third_party/wiredtiger/src/include/gcc.h
index 7135bd479c7..831dcd735d5 100644
--- a/src/third_party/wiredtiger/src/include/gcc.h
+++ b/src/third_party/wiredtiger/src/include/gcc.h
@@ -85,63 +85,71 @@
* In summary, locking > barriers > volatile.
*
* To avoid locking shared data structures such as statistics and to permit
- * atomic state changes, we rely on the WT_ATOMIC_ADD and WT_ATOMIC_CAS
- * (compare and swap) operations.
+ * atomic state changes, we rely on the atomic-add and atomic-cas (compare and
+ * swap) operations.
*/
-#define __WT_ATOMIC_ADD(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), __sync_add_and_fetch(&(v), val))
-#define __WT_ATOMIC_FETCH_ADD(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), __sync_fetch_and_add(&(v), val))
+
#ifdef __clang__
/*
- * We avoid __sync_bool_compare_and_swap with due to problems with
- * optimization with some versions of clang. See
- * http://llvm.org/bugs/show_bug.cgi?id=21499 for details.
+ * We avoid __sync_bool_compare_and_swap with due to problems with optimization
+ * with some versions of clang. See http://llvm.org/bugs/show_bug.cgi?id=21499
+ * for details.
*/
-#define __WT_ATOMIC_CAS(v, old, new, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_val_compare_and_swap(&(v), old, new) == (old))
+#define WT_ATOMIC_CAS(ptr, oldval, newval) \
+ (__sync_val_compare_and_swap(ptr, oldval, newval) == oldval)
#else
-#define __WT_ATOMIC_CAS(v, old, new, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_bool_compare_and_swap(&(v), old, new))
+#define WT_ATOMIC_CAS(ptr, oldval, newval) \
+ __sync_bool_compare_and_swap(ptr, oldval, newval)
#endif
-#define __WT_ATOMIC_CAS_VAL(v, old, new, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_val_compare_and_swap(&(v), old, new))
-#define __WT_ATOMIC_STORE(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- __sync_lock_test_and_set(&(v), val))
-#define __WT_ATOMIC_SUB(v, val, n) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), __sync_sub_and_fetch(&(v), val))
-
-#define WT_ATOMIC_ADD1(v, val) __WT_ATOMIC_ADD(v, val, 1)
-#define WT_ATOMIC_FETCH_ADD1(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 1)
-#define WT_ATOMIC_CAS1(v, old, new) __WT_ATOMIC_CAS(v, old, new, 1)
-#define WT_ATOMIC_CAS_VAL1(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 1)
-#define WT_ATOMIC_STORE1(v, val) __WT_ATOMIC_STORE(v, val, 1)
-#define WT_ATOMIC_SUB1(v, val) __WT_ATOMIC_SUB(v, val, 1)
-#define WT_ATOMIC_ADD2(v, val) __WT_ATOMIC_ADD(v, val, 2)
-#define WT_ATOMIC_FETCH_ADD2(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 2)
-#define WT_ATOMIC_CAS2(v, old, new) __WT_ATOMIC_CAS(v, old, new, 2)
-#define WT_ATOMIC_CAS_VAL2(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 2)
-#define WT_ATOMIC_STORE2(v, val) __WT_ATOMIC_STORE(v, val, 2)
-#define WT_ATOMIC_SUB2(v, val) __WT_ATOMIC_SUB(v, val, 2)
+#define WT_ATOMIC_FUNC(name, ret, type) \
+static inline ret \
+__wt_atomic_add##name(type *vp, type v) \
+{ \
+ return (__sync_add_and_fetch(vp, v)); \
+} \
+static inline ret \
+__wt_atomic_fetch_add##name(type *vp, type v) \
+{ \
+ return (__sync_fetch_and_add(vp, v)); \
+} \
+static inline ret \
+__wt_atomic_store##name(type *vp, type v) \
+{ \
+ return (__sync_lock_test_and_set(vp, v)); \
+} \
+static inline ret \
+__wt_atomic_sub##name(type *vp, type v) \
+{ \
+ return (__sync_sub_and_fetch(vp, v)); \
+} \
+static inline int \
+__wt_atomic_cas##name(type *vp, type old, type new) \
+{ \
+ return (WT_ATOMIC_CAS(vp, old, new)); \
+}
-#define WT_ATOMIC_ADD4(v, val) __WT_ATOMIC_ADD(v, val, 4)
-#define WT_ATOMIC_FETCH_ADD4(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 4)
-#define WT_ATOMIC_CAS4(v, old, new) __WT_ATOMIC_CAS(v, old, new, 4)
-#define WT_ATOMIC_CAS_VAL4(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 4)
-#define WT_ATOMIC_STORE4(v, val) __WT_ATOMIC_STORE(v, val, 4)
-#define WT_ATOMIC_SUB4(v, val) __WT_ATOMIC_SUB(v, val, 4)
+WT_ATOMIC_FUNC(8, uint8_t, uint8_t)
+WT_ATOMIC_FUNC(16, uint16_t, uint16_t)
+WT_ATOMIC_FUNC(32, uint32_t, uint32_t)
+WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t)
+WT_ATOMIC_FUNC(i32, int32_t, int32_t)
+WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t)
+WT_ATOMIC_FUNC(64, uint64_t, uint64_t)
+WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t)
+WT_ATOMIC_FUNC(i64, int64_t, int64_t)
+WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t)
+WT_ATOMIC_FUNC(size, size_t, size_t)
-#define WT_ATOMIC_ADD8(v, val) __WT_ATOMIC_ADD(v, val, 8)
-#define WT_ATOMIC_FETCH_ADD8(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 8)
-#define WT_ATOMIC_CAS8(v, old, new) __WT_ATOMIC_CAS(v, old, new, 8)
-#define WT_ATOMIC_CAS_VAL8(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new, 8)
-#define WT_ATOMIC_STORE8(v, val) __WT_ATOMIC_STORE(v, val, 8)
-#define WT_ATOMIC_SUB8(v, val) __WT_ATOMIC_SUB(v, val, 8)
+/*
+ * __wt_atomic_cas_ptr --
+ * Pointer compare and swap.
+ */
+static inline int
+__wt_atomic_cas_ptr(void *vp, void *old, void *new)
+{
+ return (WT_ATOMIC_CAS((void **)vp, old, new));
+}
/* Compile read-write barrier */
#define WT_BARRIER() __asm__ volatile("" ::: "memory")
diff --git a/src/third_party/wiredtiger/src/include/hardware.h b/src/third_party/wiredtiger/src/include/hardware.h
index 1c3fb287e86..bb909df657d 100644
--- a/src/third_party/wiredtiger/src/include/hardware.h
+++ b/src/third_party/wiredtiger/src/include/hardware.h
@@ -33,8 +33,8 @@
uint8_t __orig; \
do { \
__orig = (p)->flags_atomic; \
- } while (!WT_ATOMIC_CAS1((p)->flags_atomic, \
- __orig, __orig | (uint8_t)(mask))); \
+ } while (!__wt_atomic_cas8( \
+ &(p)->flags_atomic, __orig, __orig | (uint8_t)(mask))); \
} while (0)
#define F_CAS_ATOMIC(p, mask, ret) do { \
@@ -46,16 +46,16 @@
ret = EBUSY; \
break; \
} \
- } while (!WT_ATOMIC_CAS1((p)->flags_atomic, \
- __orig, __orig | (uint8_t)(mask))); \
+ } while (!__wt_atomic_cas8( \
+ &(p)->flags_atomic, __orig, __orig | (uint8_t)(mask))); \
} while (0)
#define F_CLR_ATOMIC(p, mask) do { \
uint8_t __orig; \
do { \
__orig = (p)->flags_atomic; \
- } while (!WT_ATOMIC_CAS1((p)->flags_atomic, \
- __orig, __orig & ~(uint8_t)(mask))); \
+ } while (!__wt_atomic_cas8( \
+ &(p)->flags_atomic, __orig, __orig & ~(uint8_t)(mask))); \
} while (0)
#define WT_CACHE_LINE_ALIGNMENT 64 /* Cache line alignment */
diff --git a/src/third_party/wiredtiger/src/include/lint.h b/src/third_party/wiredtiger/src/include/lint.h
index 631f00cb5cd..eba4a1c3b3f 100644
--- a/src/third_party/wiredtiger/src/include/lint.h
+++ b/src/third_party/wiredtiger/src/include/lint.h
@@ -18,46 +18,71 @@
#define WT_GCC_FUNC_ATTRIBUTE(x)
#define WT_GCC_FUNC_DECL_ATTRIBUTE(x)
-#define __WT_ATOMIC_ADD(v, val) \
- ((v) += (val))
-#define __WT_ATOMIC_FETCH_ADD(v, val) \
- ((v) += (val), (v))
-#define __WT_ATOMIC_CAS(v, old, new) \
- ((v) = ((v) == (old) ? (new) : (old)), (v) == (old))
-#define __WT_ATOMIC_CAS_VAL(v, old, new) \
- ((v) = ((v) == (old) ? (new) : (old)), (v) == (old))
-#define __WT_ATOMIC_STORE(v, val) \
- ((v) = (val))
-#define __WT_ATOMIC_SUB(v, val) \
- ((v) -= (val), (v))
+#define WT_ATOMIC_FUNC(name, ret, type) \
+static inline ret \
+__wt_atomic_add##name(type *vp, type v) \
+{ \
+ *vp += v; \
+ return (*vp); \
+} \
+static inline ret \
+__wt_atomic_fetch_add##name(type *vp, type v) \
+{ \
+ type orig; \
+ \
+ old = *vp; \
+ *vp += v; \
+ return (old); \
+} \
+static inline ret \
+__wt_atomic_store##name(type *vp, type v) \
+{ \
+ type orig; \
+ \
+ orig = *vp; \
+ *vp = v; \
+ return (old); \
+} \
+static inline ret \
+__wt_atomic_sub##name(type *vp, type v) \
+{ \
+ *vp -= v; \
+ return (*vp); \
+} \
+static inline int \
+__wt_atomic_cas##name(type *vp, type old, type new) \
+{ \
+ if (*vp == old) { \
+ *vp = new; \
+ return (1); \
+ } \
+ return (0); \
+}
-#define WT_ATOMIC_ADD1(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD1(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS1(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL1(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE1(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB1(v, val) __WT_ATOMIC_SUB(v, val)
+WT_ATOMIC_FUNC(8, uint8_t, uint8_t)
+WT_ATOMIC_FUNC(16, uint16_t, uint16_t)
+WT_ATOMIC_FUNC(32, uint32_t, uint32_t)
+WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t)
+WT_ATOMIC_FUNC(i32, int32_t, int32_t)
+WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t)
+WT_ATOMIC_FUNC(64, uint64_t, uint64_t)
+WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t)
+WT_ATOMIC_FUNC(i64, int64_t, int64_t)
+WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t)
+WT_ATOMIC_FUNC(size, size_t, size_t)
-#define WT_ATOMIC_ADD2(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD2(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS2(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL2(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE2(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB2(v, val) __WT_ATOMIC_SUB(v, val)
-
-#define WT_ATOMIC_ADD4(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD4(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS4(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL4(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE4(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB4(v, val) __WT_ATOMIC_SUB(v, val)
-
-#define WT_ATOMIC_ADD8(v, val) __WT_ATOMIC_ADD(v, val)
-#define WT_ATOMIC_FETCH_ADD8(v, val) __WT_ATOMIC_FETCH_ADD(v, val)
-#define WT_ATOMIC_CAS8(v, old, new) __WT_ATOMIC_CAS(v, old, new)
-#define WT_ATOMIC_CAS_VAL8(v, old, new) __WT_ATOMIC_CAS_VAL(v, old, new)
-#define WT_ATOMIC_STORE8(v, val) __WT_ATOMIC_STORE(v, val)
-#define WT_ATOMIC_SUB8(v, val) __WT_ATOMIC_SUB(v, val)
+/*
+ * __wt_atomic_cas_ptr --
+ * Pointer compare and swap.
+ */
+static inline int
+__wt_atomic_cas_ptr(void *vp, void *old, void *new) {
+ if (*(void **)vp == old) {
+ *(void **)vp = new;
+ return (1);
+ }
+ return (0);
+}
static inline void WT_BARRIER(void) { return; }
static inline void WT_FULL_BARRIER(void) { return; }
diff --git a/src/third_party/wiredtiger/src/include/lsm.h b/src/third_party/wiredtiger/src/include/lsm.h
index dc6a0d7e027..d85d2aff812 100644
--- a/src/third_party/wiredtiger/src/include/lsm.h
+++ b/src/third_party/wiredtiger/src/include/lsm.h
@@ -171,11 +171,11 @@ struct __wt_lsm_tree {
const char *collator_name;
int collator_owned;
- int refcnt; /* Number of users of the tree */
- int8_t exclusive; /* Tree is locked exclusively */
+ uint32_t refcnt; /* Number of users of the tree */
+ uint8_t exclusive; /* Tree is locked exclusively */
#define LSM_TREE_MAX_QUEUE 100
- int queue_ref;
+ uint32_t queue_ref;
WT_RWLOCK *rwlock;
TAILQ_ENTRY(__wt_lsm_tree) q;
@@ -215,7 +215,7 @@ struct __wt_lsm_tree {
WT_LSM_CHUNK **old_chunks; /* Array of old LSM chunks */
size_t old_alloc; /* Space allocated for old chunks */
u_int nold_chunks; /* Number of old chunks */
- int freeing_old_chunks; /* Whether chunks are being freed */
+ uint32_t freeing_old_chunks; /* Whether chunks are being freed */
uint32_t merge_aggressiveness; /* Increase amount of work per merge */
#define WT_LSM_TREE_ACTIVE 0x01 /* Workers are active */
diff --git a/src/third_party/wiredtiger/src/include/msvc.h b/src/third_party/wiredtiger/src/include/msvc.h
index fa5b2d848e8..f4d8ba52fc1 100644
--- a/src/third_party/wiredtiger/src/include/msvc.h
+++ b/src/third_party/wiredtiger/src/include/msvc.h
@@ -31,63 +31,56 @@
#define WT_GCC_FUNC_ATTRIBUTE(x)
#define WT_GCC_FUNC_DECL_ATTRIBUTE(x)
-#define __WT_ATOMIC_ADD(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchangeAdd ## s((t*)&(v), (t)(val)) + (val))
-#define __WT_ATOMIC_FETCH_ADD(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchangeAdd ## s((t*)&(v), (t)(val)))
-#define __WT_ATOMIC_CAS(v, old, new, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedCompareExchange ## s \
- ((t*)&(v), (t)(new), (t)(old)) == (t)(old))
-#define __WT_ATOMIC_CAS_VAL(v, old, new, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedCompareExchange ## s((t*)&(v), (t)(new), (t)(old)))
-#define __WT_ATOMIC_STORE(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchange ## s((t*)&(v), (t)(val)))
-#define __WT_ATOMIC_SUB(v, val, n, s, t) \
- (WT_STATIC_ASSERT(sizeof(v) == (n)), \
- _InterlockedExchangeAdd ## s((t*)&(v), -(t) val) - (val))
+#define WT_ATOMIC_FUNC(name, ret, type, s, t) \
+static inline ret \
+__wt_atomic_add##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchangeAdd ## s((t *)(vp), (t)(v)) + (v)); \
+} \
+static inline ret \
+__wt_atomic_fetch_add##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchangeAdd ## s((t *)(vp), (t)(v))); \
+} \
+static inline ret \
+__wt_atomic_store##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchange ## s((t *)(vp), (t)(v))); \
+} \
+static inline ret \
+__wt_atomic_sub##name(type *vp, type v) \
+{ \
+ return (_InterlockedExchangeAdd ## s((t *)(vp), - (t)v) - (v)); \
+} \
+static inline int \
+__wt_atomic_cas##name(type *vp, type old, type new) \
+{ \
+ return (_InterlockedCompareExchange ## s \
+ ((t *)(vp), (t)(new), (t)(old)) == (t)(old)); \
+}
-#define WT_ATOMIC_ADD1(v, val) __WT_ATOMIC_ADD(v, val, 1, 8, char)
-#define WT_ATOMIC_FETCH_ADD1(v, val) \
- __WT_ATOMIC_FETCH_ADD(v, val, 1, 8, char)
-#define WT_ATOMIC_CAS1(v, old, new) __WT_ATOMIC_CAS(v, old, new, 1, 8, char)
-#define WT_ATOMIC_CAS_VAL1(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 1, 8, char)
-#define WT_ATOMIC_STORE1(v, val) __WT_ATOMIC_STORE(v, val, 1, 8, char)
-#define WT_ATOMIC_SUB1(v, val) __WT_ATOMIC_SUB(v, val, 1, 8, char)
+WT_ATOMIC_FUNC(8, uint8_t, uint8_t, 8, char)
+WT_ATOMIC_FUNC(16, uint16_t, uint16_t, 16, short)
+WT_ATOMIC_FUNC(32, uint32_t, uint32_t, , long)
+WT_ATOMIC_FUNC(v32, uint32_t, volatile uint32_t, , long)
+WT_ATOMIC_FUNC(i32, int32_t, int32_t, , long)
+WT_ATOMIC_FUNC(iv32, int32_t, volatile int32_t, , long)
+WT_ATOMIC_FUNC(64, uint64_t, uint64_t, 64, __int64)
+WT_ATOMIC_FUNC(v64, uint64_t, volatile uint64_t, 64, __int64)
+WT_ATOMIC_FUNC(i64, int64_t, int64_t, 64, __int64)
+WT_ATOMIC_FUNC(iv64, int64_t, volatile int64_t, 64, __int64)
+WT_ATOMIC_FUNC(size, size_t, size_t, 64, __int64)
-#define WT_ATOMIC_ADD2(v, val) __WT_ATOMIC_ADD(v, val, 2, 16, short)
-#define WT_ATOMIC_FETCH_ADD2(v, val) \
- __WT_ATOMIC_FETCH_ADD(v, val, 2, 16, short)
-#define WT_ATOMIC_CAS2(v, old, new) \
- __WT_ATOMIC_CAS(v, old, new, 2, 16, short)
-#define WT_ATOMIC_CAS_VAL2(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 2, 16, short)
-#define WT_ATOMIC_STORE2(v, val) __WT_ATOMIC_STORE(v, val, 2, 16, short)
-#define WT_ATOMIC_SUB2(v, val) __WT_ATOMIC_SUB(v, val, 2, 16, short)
-
-#define WT_ATOMIC_ADD4(v, val) __WT_ATOMIC_ADD(v, val, 4, , long)
-#define WT_ATOMIC_FETCH_ADD4(v, val) __WT_ATOMIC_FETCH_ADD(v, val, 4, , long)
-#define WT_ATOMIC_CAS4(v, old, new) __WT_ATOMIC_CAS(v, old, new, 4, , long)
-#define WT_ATOMIC_CAS_VAL4(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 4, , long)
-#define WT_ATOMIC_STORE4(v, val) __WT_ATOMIC_STORE(v, val, 4, , long)
-#define WT_ATOMIC_SUB4(v, val) __WT_ATOMIC_SUB(v, val, 4, , long)
-
-#define WT_ATOMIC_ADD8(v, val) __WT_ATOMIC_ADD(v, val, 8, 64, __int64)
-#define WT_ATOMIC_FETCH_ADD8(v, val) \
- __WT_ATOMIC_FETCH_ADD(v, val, 8, 64, __int64)
-#define WT_ATOMIC_CAS8(v, old, new) \
- __WT_ATOMIC_CAS(v, old, new, 8, 64, __int64)
-#define WT_ATOMIC_CAS_VAL8(v, old, new) \
- __WT_ATOMIC_CAS_VAL(v, old, new, 8, 64, __int64)
-#define WT_ATOMIC_STORE8(v, val) \
- __WT_ATOMIC_STORE(v, val, 8, 64, __int64)
-#define WT_ATOMIC_SUB8(v, val) __WT_ATOMIC_SUB(v, val, 8, 64, __int64)
+/*
+ * __wt_atomic_cas_ptr --
+ * Pointer compare and swap.
+ */
+static inline int
+__wt_atomic_cas_ptr(void *vp, void *old, void *new)
+{
+ return (_InterlockedCompareExchange64(
+ vp, (int64_t)new, (int64_t)old) == ((int64_t)old));
+}
static inline void WT_BARRIER(void) { _ReadWriteBarrier(); }
static inline void WT_FULL_BARRIER(void) { _mm_mfence(); }
diff --git a/src/third_party/wiredtiger/src/include/serial.i b/src/third_party/wiredtiger/src/include/serial.i
index 0fc23348800..7b62e66eccb 100644
--- a/src/third_party/wiredtiger/src/include/serial.i
+++ b/src/third_party/wiredtiger/src/include/serial.i
@@ -56,7 +56,7 @@ __insert_simple_func(WT_SESSION_IMPL *session,
for (i = 0; i < skipdepth; i++) {
WT_INSERT *old_ins = *ins_stack[i];
if (old_ins != new_ins->next[i] ||
- !WT_ATOMIC_CAS8(*ins_stack[i], old_ins, new_ins))
+ !__wt_atomic_cas_ptr(ins_stack[i], old_ins, new_ins))
return (i == 0 ? WT_RESTART : 0);
}
@@ -93,7 +93,7 @@ __insert_serial_func(WT_SESSION_IMPL *session, WT_INSERT_HEAD *ins_head,
for (i = 0; i < skipdepth; i++) {
WT_INSERT *old_ins = *ins_stack[i];
if (old_ins != new_ins->next[i] ||
- !WT_ATOMIC_CAS8(*ins_stack[i], old_ins, new_ins))
+ !__wt_atomic_cas_ptr(ins_stack[i], old_ins, new_ins))
return (i == 0 ? WT_RESTART : 0);
if (ins_head->tail[i] == NULL ||
ins_stack[i] == &ins_head->tail[i]->next[i])
@@ -271,7 +271,7 @@ __wt_update_serial(WT_SESSION_IMPL *session, WT_PAGE *page,
* Swap the update into place. If that fails, a new update was added
* after our search, we raced. Check if our update is still permitted.
*/
- while (!WT_ATOMIC_CAS8(*srch_upd, upd->next, upd)) {
+ while (!__wt_atomic_cas_ptr(srch_upd, upd->next, upd)) {
if ((ret = __wt_txn_update_check(
session, upd->next = *srch_upd)) != 0) {
/* Free unused memory on error. */
diff --git a/src/third_party/wiredtiger/src/include/stat.h b/src/third_party/wiredtiger/src/include/stat.h
index aab251f0e31..64d5c4bc71e 100644
--- a/src/third_party/wiredtiger/src/include/stat.h
+++ b/src/third_party/wiredtiger/src/include/stat.h
@@ -17,11 +17,11 @@ struct __wt_stats {
#define WT_STAT(stats, fld) \
((stats)->fld.v)
#define WT_STAT_ATOMIC_DECRV(stats, fld, value) do { \
- (void)WT_ATOMIC_SUB8(WT_STAT(stats, fld), (value)); \
+ (void)__wt_atomic_sub64(&WT_STAT(stats, fld), (value)); \
} while (0)
#define WT_STAT_ATOMIC_DECR(stats, fld) WT_STAT_ATOMIC_DECRV(stats, fld, 1)
#define WT_STAT_ATOMIC_INCRV(stats, fld, value) do { \
- (void)WT_ATOMIC_ADD8(WT_STAT(stats, fld), (value)); \
+ (void)__wt_atomic_add64(&WT_STAT(stats, fld), (value)); \
} while (0)
#define WT_STAT_ATOMIC_INCR(stats, fld) WT_ATOMIC_ADD8(WT_STAT(stats, fld), 1)
#define WT_STAT_DECRV(stats, fld, value) do { \
@@ -214,6 +214,7 @@ struct __wt_connection_stats {
WT_STATS log_compress_small;
WT_STATS log_compress_write_fails;
WT_STATS log_compress_writes;
+ WT_STATS log_flush;
WT_STATS log_max_filesize;
WT_STATS log_prealloc_files;
WT_STATS log_prealloc_max;
diff --git a/src/third_party/wiredtiger/src/include/txn.i b/src/third_party/wiredtiger/src/include/txn.i
index 95a8f99cf1b..a262672630f 100644
--- a/src/third_party/wiredtiger/src/include/txn.i
+++ b/src/third_party/wiredtiger/src/include/txn.i
@@ -284,7 +284,7 @@ __wt_txn_new_id(WT_SESSION_IMPL *session)
* global current ID, so we want post-increment semantics. Our atomic
* add primitive does pre-increment, so adjust the result here.
*/
- return (WT_ATOMIC_ADD8(S2C(session)->txn_global.current, 1) - 1);
+ return (__wt_atomic_addv64(&S2C(session)->txn_global.current, 1) - 1);
}
/*
@@ -360,8 +360,9 @@ __wt_txn_id_check(WT_SESSION_IMPL *session)
*/
do {
txn_state->id = txn->id = txn_global->current;
- } while (!WT_ATOMIC_CAS8(
- txn_global->current, txn->id, txn->id + 1));
+ } while (!__wt_atomic_casv64(
+ &txn_global->current, txn->id, txn->id + 1) ||
+ TXNID_LT(txn->id, txn_global->last_running));
/*
* If we have used 64-bits of transaction IDs, there is nothing
diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in
index ab03d2d27ad..2667e02c5b6 100644
--- a/src/third_party/wiredtiger/src/include/wiredtiger.in
+++ b/src/third_party/wiredtiger/src/include/wiredtiger.in
@@ -1213,6 +1213,16 @@ struct __wt_session {
const char *name, const char *config);
/*!
+ * Flush the log.
+ *
+ * @param session the session handle
+ * @configstart{WT_SESSION.log_flush, see dist/api_data.py}
+ * @configend
+ * @errors
+ */
+ int __F(log_flush)(WT_SESSION *session, const char *config);
+
+ /*!
* Insert a ::WT_LOGREC_MESSAGE type record in the database log files
* (the database must be configured for logging when this method is
* called).
@@ -3369,130 +3379,132 @@ extern int wiredtiger_extension_terminate(WT_CONNECTION *connection);
#define WT_STAT_CONN_LOG_COMPRESS_WRITE_FAILS 1080
/*! log: log records compressed */
#define WT_STAT_CONN_LOG_COMPRESS_WRITES 1081
+/*! log: log flush operations */
+#define WT_STAT_CONN_LOG_FLUSH 1082
/*! log: maximum log file size */
-#define WT_STAT_CONN_LOG_MAX_FILESIZE 1082
+#define WT_STAT_CONN_LOG_MAX_FILESIZE 1083
/*! log: pre-allocated log files prepared */
-#define WT_STAT_CONN_LOG_PREALLOC_FILES 1083
+#define WT_STAT_CONN_LOG_PREALLOC_FILES 1084
/*! log: number of pre-allocated log files to create */
-#define WT_STAT_CONN_LOG_PREALLOC_MAX 1084
+#define WT_STAT_CONN_LOG_PREALLOC_MAX 1085
/*! log: pre-allocated log files used */
-#define WT_STAT_CONN_LOG_PREALLOC_USED 1085
+#define WT_STAT_CONN_LOG_PREALLOC_USED 1086
/*! log: log read operations */
-#define WT_STAT_CONN_LOG_READS 1086
+#define WT_STAT_CONN_LOG_READS 1087
/*! log: log release advances write LSN */
-#define WT_STAT_CONN_LOG_RELEASE_WRITE_LSN 1087
+#define WT_STAT_CONN_LOG_RELEASE_WRITE_LSN 1088
/*! log: records processed by log scan */
-#define WT_STAT_CONN_LOG_SCAN_RECORDS 1088
+#define WT_STAT_CONN_LOG_SCAN_RECORDS 1089
/*! log: log scan records requiring two reads */
-#define WT_STAT_CONN_LOG_SCAN_REREADS 1089
+#define WT_STAT_CONN_LOG_SCAN_REREADS 1090
/*! log: log scan operations */
-#define WT_STAT_CONN_LOG_SCANS 1090
+#define WT_STAT_CONN_LOG_SCANS 1091
/*! log: consolidated slot closures */
-#define WT_STAT_CONN_LOG_SLOT_CLOSES 1091
+#define WT_STAT_CONN_LOG_SLOT_CLOSES 1092
/*! log: logging bytes consolidated */
-#define WT_STAT_CONN_LOG_SLOT_CONSOLIDATED 1092
+#define WT_STAT_CONN_LOG_SLOT_CONSOLIDATED 1093
/*! log: consolidated slot joins */
-#define WT_STAT_CONN_LOG_SLOT_JOINS 1093
+#define WT_STAT_CONN_LOG_SLOT_JOINS 1094
/*! log: consolidated slot join races */
-#define WT_STAT_CONN_LOG_SLOT_RACES 1094
+#define WT_STAT_CONN_LOG_SLOT_RACES 1095
/*! log: slots selected for switching that were unavailable */
-#define WT_STAT_CONN_LOG_SLOT_SWITCH_FAILS 1095
+#define WT_STAT_CONN_LOG_SLOT_SWITCH_FAILS 1096
/*! log: record size exceeded maximum */
-#define WT_STAT_CONN_LOG_SLOT_TOOBIG 1096
+#define WT_STAT_CONN_LOG_SLOT_TOOBIG 1097
/*! log: failed to find a slot large enough for record */
-#define WT_STAT_CONN_LOG_SLOT_TOOSMALL 1097
+#define WT_STAT_CONN_LOG_SLOT_TOOSMALL 1098
/*! log: consolidated slot join transitions */
-#define WT_STAT_CONN_LOG_SLOT_TRANSITIONS 1098
+#define WT_STAT_CONN_LOG_SLOT_TRANSITIONS 1099
/*! log: log sync operations */
-#define WT_STAT_CONN_LOG_SYNC 1099
+#define WT_STAT_CONN_LOG_SYNC 1100
/*! log: log sync_dir operations */
-#define WT_STAT_CONN_LOG_SYNC_DIR 1100
+#define WT_STAT_CONN_LOG_SYNC_DIR 1101
/*! log: log server thread advances write LSN */
-#define WT_STAT_CONN_LOG_WRITE_LSN 1101
+#define WT_STAT_CONN_LOG_WRITE_LSN 1102
/*! log: log write operations */
-#define WT_STAT_CONN_LOG_WRITES 1102
+#define WT_STAT_CONN_LOG_WRITES 1103
/*! LSM: sleep for LSM checkpoint throttle */
-#define WT_STAT_CONN_LSM_CHECKPOINT_THROTTLE 1103
+#define WT_STAT_CONN_LSM_CHECKPOINT_THROTTLE 1104
/*! LSM: sleep for LSM merge throttle */
-#define WT_STAT_CONN_LSM_MERGE_THROTTLE 1104
+#define WT_STAT_CONN_LSM_MERGE_THROTTLE 1105
/*! LSM: rows merged in an LSM tree */
-#define WT_STAT_CONN_LSM_ROWS_MERGED 1105
+#define WT_STAT_CONN_LSM_ROWS_MERGED 1106
/*! LSM: application work units currently queued */
-#define WT_STAT_CONN_LSM_WORK_QUEUE_APP 1106
+#define WT_STAT_CONN_LSM_WORK_QUEUE_APP 1107
/*! LSM: merge work units currently queued */
-#define WT_STAT_CONN_LSM_WORK_QUEUE_MANAGER 1107
+#define WT_STAT_CONN_LSM_WORK_QUEUE_MANAGER 1108
/*! LSM: tree queue hit maximum */
-#define WT_STAT_CONN_LSM_WORK_QUEUE_MAX 1108
+#define WT_STAT_CONN_LSM_WORK_QUEUE_MAX 1109
/*! LSM: switch work units currently queued */
-#define WT_STAT_CONN_LSM_WORK_QUEUE_SWITCH 1109
+#define WT_STAT_CONN_LSM_WORK_QUEUE_SWITCH 1110
/*! LSM: tree maintenance operations scheduled */
-#define WT_STAT_CONN_LSM_WORK_UNITS_CREATED 1110
+#define WT_STAT_CONN_LSM_WORK_UNITS_CREATED 1111
/*! LSM: tree maintenance operations discarded */
-#define WT_STAT_CONN_LSM_WORK_UNITS_DISCARDED 1111
+#define WT_STAT_CONN_LSM_WORK_UNITS_DISCARDED 1112
/*! LSM: tree maintenance operations executed */
-#define WT_STAT_CONN_LSM_WORK_UNITS_DONE 1112
+#define WT_STAT_CONN_LSM_WORK_UNITS_DONE 1113
/*! connection: memory allocations */
-#define WT_STAT_CONN_MEMORY_ALLOCATION 1113
+#define WT_STAT_CONN_MEMORY_ALLOCATION 1114
/*! connection: memory frees */
-#define WT_STAT_CONN_MEMORY_FREE 1114
+#define WT_STAT_CONN_MEMORY_FREE 1115
/*! connection: memory re-allocations */
-#define WT_STAT_CONN_MEMORY_GROW 1115
+#define WT_STAT_CONN_MEMORY_GROW 1116
/*! thread-yield: page acquire busy blocked */
-#define WT_STAT_CONN_PAGE_BUSY_BLOCKED 1116
+#define WT_STAT_CONN_PAGE_BUSY_BLOCKED 1117
/*! thread-yield: page acquire eviction blocked */
-#define WT_STAT_CONN_PAGE_FORCIBLE_EVICT_BLOCKED 1117
+#define WT_STAT_CONN_PAGE_FORCIBLE_EVICT_BLOCKED 1118
/*! thread-yield: page acquire locked blocked */
-#define WT_STAT_CONN_PAGE_LOCKED_BLOCKED 1118
+#define WT_STAT_CONN_PAGE_LOCKED_BLOCKED 1119
/*! thread-yield: page acquire read blocked */
-#define WT_STAT_CONN_PAGE_READ_BLOCKED 1119
+#define WT_STAT_CONN_PAGE_READ_BLOCKED 1120
/*! thread-yield: page acquire time sleeping (usecs) */
-#define WT_STAT_CONN_PAGE_SLEEP 1120
+#define WT_STAT_CONN_PAGE_SLEEP 1121
/*! connection: total read I/Os */
-#define WT_STAT_CONN_READ_IO 1121
+#define WT_STAT_CONN_READ_IO 1122
/*! reconciliation: page reconciliation calls */
-#define WT_STAT_CONN_REC_PAGES 1122
+#define WT_STAT_CONN_REC_PAGES 1123
/*! reconciliation: page reconciliation calls for eviction */
-#define WT_STAT_CONN_REC_PAGES_EVICTION 1123
+#define WT_STAT_CONN_REC_PAGES_EVICTION 1124
/*! reconciliation: split bytes currently awaiting free */
-#define WT_STAT_CONN_REC_SPLIT_STASHED_BYTES 1124
+#define WT_STAT_CONN_REC_SPLIT_STASHED_BYTES 1125
/*! reconciliation: split objects currently awaiting free */
-#define WT_STAT_CONN_REC_SPLIT_STASHED_OBJECTS 1125
+#define WT_STAT_CONN_REC_SPLIT_STASHED_OBJECTS 1126
/*! connection: pthread mutex shared lock read-lock calls */
-#define WT_STAT_CONN_RWLOCK_READ 1126
+#define WT_STAT_CONN_RWLOCK_READ 1127
/*! connection: pthread mutex shared lock write-lock calls */
-#define WT_STAT_CONN_RWLOCK_WRITE 1127
+#define WT_STAT_CONN_RWLOCK_WRITE 1128
/*! session: open cursor count */
-#define WT_STAT_CONN_SESSION_CURSOR_OPEN 1128
+#define WT_STAT_CONN_SESSION_CURSOR_OPEN 1129
/*! session: open session count */
-#define WT_STAT_CONN_SESSION_OPEN 1129
+#define WT_STAT_CONN_SESSION_OPEN 1130
/*! transaction: transaction begins */
-#define WT_STAT_CONN_TXN_BEGIN 1130
+#define WT_STAT_CONN_TXN_BEGIN 1131
/*! transaction: transaction checkpoints */
-#define WT_STAT_CONN_TXN_CHECKPOINT 1131
+#define WT_STAT_CONN_TXN_CHECKPOINT 1132
/*! transaction: transaction checkpoint generation */
-#define WT_STAT_CONN_TXN_CHECKPOINT_GENERATION 1132
+#define WT_STAT_CONN_TXN_CHECKPOINT_GENERATION 1133
/*! transaction: transaction checkpoint currently running */
-#define WT_STAT_CONN_TXN_CHECKPOINT_RUNNING 1133
+#define WT_STAT_CONN_TXN_CHECKPOINT_RUNNING 1134
/*! transaction: transaction checkpoint max time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MAX 1134
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MAX 1135
/*! transaction: transaction checkpoint min time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MIN 1135
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_MIN 1136
/*! transaction: transaction checkpoint most recent time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_RECENT 1136
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_RECENT 1137
/*! transaction: transaction checkpoint total time (msecs) */
-#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_TOTAL 1137
+#define WT_STAT_CONN_TXN_CHECKPOINT_TIME_TOTAL 1138
/*! transaction: transactions committed */
-#define WT_STAT_CONN_TXN_COMMIT 1138
+#define WT_STAT_CONN_TXN_COMMIT 1139
/*! transaction: transaction failures due to cache overflow */
-#define WT_STAT_CONN_TXN_FAIL_CACHE 1139
+#define WT_STAT_CONN_TXN_FAIL_CACHE 1140
/*! transaction: transaction range of IDs currently pinned by a checkpoint */
-#define WT_STAT_CONN_TXN_PINNED_CHECKPOINT_RANGE 1140
+#define WT_STAT_CONN_TXN_PINNED_CHECKPOINT_RANGE 1141
/*! transaction: transaction range of IDs currently pinned */
-#define WT_STAT_CONN_TXN_PINNED_RANGE 1141
+#define WT_STAT_CONN_TXN_PINNED_RANGE 1142
/*! transaction: transactions rolled back */
-#define WT_STAT_CONN_TXN_ROLLBACK 1142
+#define WT_STAT_CONN_TXN_ROLLBACK 1143
/*! connection: total write I/Os */
-#define WT_STAT_CONN_WRITE_IO 1143
+#define WT_STAT_CONN_WRITE_IO 1144
/*!
* @}
diff --git a/src/third_party/wiredtiger/src/log/log.c b/src/third_party/wiredtiger/src/log/log.c
index 76cf5f55f7b..567200ffa0b 100644
--- a/src/third_party/wiredtiger/src/log/log.c
+++ b/src/third_party/wiredtiger/src/log/log.c
@@ -1920,3 +1920,36 @@ __wt_log_vprintf(WT_SESSION_IMPL *session, const char *fmt, va_list ap)
err: __wt_scr_free(session, &logrec);
return (ret);
}
+
+/*
+ * __wt_log_flush --
+ * Forcibly flush the log to the synchronization level specified.
+ * Wait until it has been completed.
+ */
+int
+__wt_log_flush(WT_SESSION_IMPL *session, uint32_t flags)
+{
+ WT_CONNECTION_IMPL *conn;
+ WT_LOG *log;
+ WT_LSN last_lsn, lsn;
+
+ conn = S2C(session);
+ WT_ASSERT(session, FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED));
+ log = conn->log;
+ last_lsn = log->alloc_lsn;
+ lsn = log->write_lsn;
+ /*
+ * Wait until all current outstanding writes have been written
+ * to the file system.
+ */
+ while (LOG_CMP(&last_lsn, &lsn) > 0) {
+ __wt_yield();
+ lsn = log->write_lsn;
+ }
+ /*
+ * If the user wants sync, force it now.
+ */
+ if (LF_ISSET(WT_LOG_FSYNC))
+ WT_RET(__wt_log_force_sync(session, &lsn));
+ return (0);
+}
diff --git a/src/third_party/wiredtiger/src/log/log_slot.c b/src/third_party/wiredtiger/src/log/log_slot.c
index a08a9aff001..b6b5f6bc896 100644
--- a/src/third_party/wiredtiger/src/log/log_slot.c
+++ b/src/third_party/wiredtiger/src/log/log_slot.c
@@ -104,7 +104,7 @@ __wt_log_slot_join(WT_SESSION_IMPL *session, uint64_t mysize,
WT_CONNECTION_IMPL *conn;
WT_LOG *log;
WT_LOGSLOT *slot;
- int64_t cur_state, new_state, old_state;
+ int64_t new_state, old_state;
uint32_t allocated_slot, slot_attempts;
conn = S2C(session);
@@ -118,8 +118,8 @@ __wt_log_slot_join(WT_SESSION_IMPL *session, uint64_t mysize,
find_slot:
allocated_slot = __wt_random(&session->rnd) % WT_SLOT_ACTIVE;
slot = log->slot_array[allocated_slot];
- old_state = slot->slot_state;
join_slot:
+ old_state = slot->slot_state;
/*
* WT_LOG_SLOT_READY and higher means the slot is available for
* joining. Any other state means it is in use and transitioning
@@ -150,13 +150,11 @@ join_slot:
}
goto find_slot;
}
- cur_state = WT_ATOMIC_CAS_VAL8(slot->slot_state, old_state, new_state);
/*
* We lost a race to add our size into this slot. Check the state
* and try again.
*/
- if (cur_state != old_state) {
- old_state = cur_state;
+ if (!__wt_atomic_casiv64(&slot->slot_state, old_state, new_state)) {
WT_STAT_FAST_CONN_INCR(session, log_slot_races);
goto join_slot;
}
@@ -232,7 +230,8 @@ retry:
newslot->slot_state = WT_LOG_SLOT_READY;
newslot->slot_index = slot->slot_index;
log->slot_array[newslot->slot_index] = &log->slot_pool[pool_i];
- old_state = WT_ATOMIC_STORE8(slot->slot_state, WT_LOG_SLOT_PENDING);
+ old_state =
+ __wt_atomic_storeiv64(&slot->slot_state, WT_LOG_SLOT_PENDING);
slot->slot_group_size = (uint64_t)(old_state - WT_LOG_SLOT_READY);
/*
* Note that this statistic may be much bigger than in reality,
@@ -288,14 +287,11 @@ __wt_log_slot_wait(WT_SESSION_IMPL *session, WT_LOGSLOT *slot)
int64_t
__wt_log_slot_release(WT_LOGSLOT *slot, uint64_t size)
{
- int64_t newsize;
-
/*
* Add my size into the state. When it reaches WT_LOG_SLOT_DONE
* all participatory threads have completed copying their piece.
*/
- newsize = WT_ATOMIC_ADD8(slot->slot_state, (int64_t)size);
- return (newsize);
+ return (__wt_atomic_addiv64(&slot->slot_state, (int64_t)size));
}
/*
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_cursor_bulk.c b/src/third_party/wiredtiger/src/lsm/lsm_cursor_bulk.c
index 8099c87c3bf..d67eb33c9e8 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_cursor_bulk.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_cursor_bulk.c
@@ -128,4 +128,3 @@ __wt_clsm_open_bulk(WT_CURSOR_LSM *clsm, const char *cfg[])
return (0);
}
-
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_manager.c b/src/third_party/wiredtiger/src/lsm/lsm_manager.c
index 1ea41f24ee2..0e22af3b623 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_manager.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_manager.c
@@ -259,7 +259,7 @@ __wt_lsm_manager_free_work_unit(
if (entry != NULL) {
WT_ASSERT(session, entry->lsm_tree->queue_ref > 0);
- (void)WT_ATOMIC_SUB4(entry->lsm_tree->queue_ref, 1);
+ (void)__wt_atomic_sub32(&entry->lsm_tree->queue_ref, 1);
__wt_free(session, entry);
}
}
@@ -673,7 +673,7 @@ __wt_lsm_manager_push_entry(WT_SESSION_IMPL *session,
entry->type = type;
entry->flags = flags;
entry->lsm_tree = lsm_tree;
- (void)WT_ATOMIC_ADD4(lsm_tree->queue_ref, 1);
+ (void)__wt_atomic_add32(&lsm_tree->queue_ref, 1);
WT_STAT_FAST_CONN_INCR(session, lsm_work_units_created);
if (type == WT_LSM_WORK_SWITCH)
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_merge.c b/src/third_party/wiredtiger/src/lsm/lsm_merge.c
index 6ca1b0f04ab..de7ea37e498 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_merge.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_merge.c
@@ -284,7 +284,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
locked = 0;
/* Allocate an ID for the merge. */
- dest_id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ dest_id = __wt_atomic_add32(&lsm_tree->last, 1);
/*
* We only want to do the chunk loop if we're running with verbose,
@@ -375,7 +375,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
* merge_syncing field so that compact knows it is still in
* progress.
*/
- (void)WT_ATOMIC_ADD4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_add32(&lsm_tree->merge_syncing, 1);
in_sync = 1;
/*
* We've successfully created the new chunk. Now install it. We need
@@ -426,7 +426,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
WT_TRET(dest->close(dest));
dest = NULL;
++lsm_tree->merge_progressing;
- (void)WT_ATOMIC_SUB4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1);
in_sync = 0;
WT_ERR_NOTFOUND_OK(ret);
@@ -482,7 +482,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
err: if (locked)
WT_TRET(__wt_lsm_tree_writeunlock(session, lsm_tree));
if (in_sync)
- (void)WT_ATOMIC_SUB4(lsm_tree->merge_syncing, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->merge_syncing, 1);
if (src != NULL)
WT_TRET(src->close(src));
if (dest != NULL)
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_tree.c b/src/third_party/wiredtiger/src/lsm/lsm_tree.c
index 63f19858279..3c8f4d5750a 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_tree.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_tree.c
@@ -134,7 +134,7 @@ __wt_lsm_tree_close_all(WT_SESSION_IMPL *session)
* is no need to decrement the reference count since discard
* is unconditional.
*/
- (void)WT_ATOMIC_ADD4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_add32(&lsm_tree->refcnt, 1);
WT_TRET(__lsm_tree_close(session, lsm_tree));
WT_TRET(__lsm_tree_discard(session, lsm_tree, 1));
}
@@ -474,15 +474,17 @@ __lsm_tree_find(WT_SESSION_IMPL *session,
* Make sure we win the race to switch on the
* exclusive flag.
*/
- if (!WT_ATOMIC_CAS1(lsm_tree->exclusive, 0, 1))
+ if (!__wt_atomic_cas8(
+ &lsm_tree->exclusive, 0, 1))
return (EBUSY);
/* Make sure there are no readers */
- if (!WT_ATOMIC_CAS4(lsm_tree->refcnt, 0, 1)) {
+ if (!__wt_atomic_cas32(
+ &lsm_tree->refcnt, 0, 1)) {
lsm_tree->exclusive = 0;
return (EBUSY);
}
} else {
- (void)WT_ATOMIC_ADD4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_add32(&lsm_tree->refcnt, 1);
/*
* We got a reference, check if an exclusive
@@ -491,8 +493,8 @@ __lsm_tree_find(WT_SESSION_IMPL *session,
if (lsm_tree->exclusive) {
WT_ASSERT(session,
lsm_tree->refcnt > 0);
- (void)WT_ATOMIC_SUB4(
- lsm_tree->refcnt, 1);
+ (void)__wt_atomic_sub32(
+ &lsm_tree->refcnt, 1);
return (EBUSY);
}
}
@@ -553,7 +555,7 @@ __lsm_tree_open(WT_SESSION_IMPL *session,
WT_ASSERT(session, F_ISSET(session, WT_SESSION_HANDLE_LIST_LOCKED));
/* Start the LSM manager thread if it isn't running. */
- if (WT_ATOMIC_CAS4(conn->lsm_manager.lsm_workers, 0, 1))
+ if (__wt_atomic_cas32(&conn->lsm_manager.lsm_workers, 0, 1))
WT_RET(__wt_lsm_manager_start(session));
/* Make sure no one beat us to it. */
@@ -632,7 +634,7 @@ __wt_lsm_tree_release(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
WT_ASSERT(session, lsm_tree->refcnt > 0);
if (lsm_tree->exclusive)
lsm_tree->exclusive = 0;
- (void)WT_ATOMIC_SUB4(lsm_tree->refcnt, 1);
+ (void)__wt_atomic_sub32(&lsm_tree->refcnt, 1);
}
/* How aggressively to ramp up or down throttle due to level 0 merging */
@@ -827,7 +829,7 @@ __wt_lsm_tree_switch(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
/* Update the throttle time. */
__wt_lsm_tree_throttle(session, lsm_tree, 0);
- new_id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ new_id = __wt_atomic_add32(&lsm_tree->last, 1);
WT_ERR(__wt_realloc_def(session, &lsm_tree->chunk_alloc,
nchunks + 1, &lsm_tree->chunk));
@@ -1085,7 +1087,7 @@ __wt_lsm_tree_truncate(
/* Create the new chunk. */
WT_ERR(__wt_calloc_one(session, &chunk));
- chunk->id = WT_ATOMIC_ADD4(lsm_tree->last, 1);
+ chunk->id = __wt_atomic_add32(&lsm_tree->last, 1);
WT_ERR(__wt_lsm_tree_setup_chunk(session, lsm_tree, chunk));
/* Mark all chunks old. */
@@ -1195,7 +1197,8 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
WT_LSM_TREE *lsm_tree;
time_t begin, end;
uint64_t progress;
- int i, compacting, flushing, locked, ref;
+ uint32_t i;
+ int compacting, flushing, locked, ref;
compacting = flushing = locked = ref = 0;
chunk = NULL;
@@ -1258,7 +1261,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
* If we have a chunk, we want to look for it to be on-disk.
* So we need to add a reference to keep it available.
*/
- (void)WT_ATOMIC_ADD4(chunk->refcnt, 1);
+ (void)__wt_atomic_add32(&chunk->refcnt, 1);
ref = 1;
}
@@ -1306,7 +1309,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
"Start compacting progress %" PRIu64,
name, chunk->id,
lsm_tree->merge_progressing));
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
flushing = ref = 0;
compacting = 1;
F_SET(lsm_tree, WT_LSM_TREE_COMPACTING);
@@ -1360,7 +1363,7 @@ __wt_lsm_compact(WT_SESSION_IMPL *session, const char *name, int *skip)
err:
/* Ensure anything we set is cleared. */
if (ref)
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
if (compacting) {
F_CLR(lsm_tree, WT_LSM_TREE_COMPACTING);
lsm_tree->merge_aggressiveness = 0;
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c b/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c
index 4f5e1516f1c..3e0fd43d404 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c
@@ -53,7 +53,7 @@ __lsm_copy_chunks(WT_SESSION_IMPL *session,
* it's safe.
*/
for (i = 0; i < nchunks; i++)
- (void)WT_ATOMIC_ADD4(cookie->chunk_array[i]->refcnt, 1);
+ (void)__wt_atomic_add32(&cookie->chunk_array[i]->refcnt, 1);
err: WT_TRET(__wt_lsm_tree_readunlock(session, lsm_tree));
@@ -122,7 +122,7 @@ __wt_lsm_get_chunk_to_flush(WT_SESSION_IMPL *session,
force ? " w/ force" : "",
i, lsm_tree->nchunks, chunk->uri));
- (void)WT_ATOMIC_ADD4(chunk->refcnt, 1);
+ (void)__wt_atomic_add32(&chunk->refcnt, 1);
}
err: WT_RET(__wt_lsm_tree_readunlock(session, lsm_tree));
@@ -145,7 +145,7 @@ __lsm_unpin_chunks(WT_SESSION_IMPL *session, WT_LSM_WORKER_COOKIE *cookie)
if (cookie->chunk_array[i] == NULL)
continue;
WT_ASSERT(session, cookie->chunk_array[i]->refcnt > 0);
- (void)WT_ATOMIC_SUB4(cookie->chunk_array[i]->refcnt, 1);
+ (void)__wt_atomic_sub32(&cookie->chunk_array[i]->refcnt, 1);
}
/* Ensure subsequent calls don't double decrement. */
cookie->nchunks = 0;
@@ -219,7 +219,7 @@ __wt_lsm_work_bloom(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* See if we win the race to switch on the "busy" flag and
* recheck that the chunk still needs a Bloom filter.
*/
- if (WT_ATOMIC_CAS4(chunk->bloom_busy, 0, 1)) {
+ if (__wt_atomic_cas32(&chunk->bloom_busy, 0, 1)) {
if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) {
ret = __lsm_bloom_create(
session, lsm_tree, chunk, (u_int)i);
@@ -541,7 +541,7 @@ __wt_lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* Make sure only a single thread is freeing the old chunk array
* at any time.
*/
- if (!WT_ATOMIC_CAS4(lsm_tree->freeing_old_chunks, 0, 1))
+ if (!__wt_atomic_cas32(&lsm_tree->freeing_old_chunks, 0, 1))
return (0);
/*
* Take a copy of the current state of the LSM tree and look for chunks
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_worker.c b/src/third_party/wiredtiger/src/lsm/lsm_worker.c
index d1272df763d..252523c5c57 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_worker.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_worker.c
@@ -65,7 +65,7 @@ __lsm_worker_general_op(
ret = __wt_lsm_checkpoint_chunk(
session, entry->lsm_tree, chunk);
WT_ASSERT(session, chunk->refcnt > 0);
- (void)WT_ATOMIC_SUB4(chunk->refcnt, 1);
+ (void)__wt_atomic_sub32(&chunk->refcnt, 1);
WT_ERR(ret);
}
} else if (entry->type == WT_LSM_WORK_DROP)
diff --git a/src/third_party/wiredtiger/src/os_posix/os_mtx_cond.c b/src/third_party/wiredtiger/src/os_posix/os_mtx_cond.c
index dfd72dd0cd2..baf9b475777 100644
--- a/src/third_party/wiredtiger/src/os_posix/os_mtx_cond.c
+++ b/src/third_party/wiredtiger/src/os_posix/os_mtx_cond.c
@@ -54,7 +54,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
locked = 0;
/* Fast path if already signalled. */
- if (WT_ATOMIC_ADD4(cond->waiters, 1) == 0)
+ if (__wt_atomic_addi32(&cond->waiters, 1) == 0)
return (0);
/*
@@ -91,7 +91,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
ret == ETIMEDOUT)
ret = 0;
- (void)WT_ATOMIC_SUB4(cond->waiters, 1);
+ (void)__wt_atomic_subi32(&cond->waiters, 1);
err: if (locked)
WT_TRET(pthread_mutex_unlock(&cond->mtx));
@@ -124,7 +124,7 @@ __wt_cond_signal(WT_SESSION_IMPL *session, WT_CONDVAR *cond)
if (cond->waiters == -1)
return (0);
- if (cond->waiters > 0 || !WT_ATOMIC_CAS4(cond->waiters, 0, -1)) {
+ if (cond->waiters > 0 || !__wt_atomic_casi32(&cond->waiters, 0, -1)) {
WT_ERR(pthread_mutex_lock(&cond->mtx));
locked = 1;
WT_ERR(pthread_cond_broadcast(&cond->cond));
diff --git a/src/third_party/wiredtiger/src/os_posix/os_mtx_rw.c b/src/third_party/wiredtiger/src/os_posix/os_mtx_rw.c
index c3ae43b605f..3927d618ede 100644
--- a/src/third_party/wiredtiger/src/os_posix/os_mtx_rw.c
+++ b/src/third_party/wiredtiger/src/os_posix/os_mtx_rw.c
@@ -82,7 +82,7 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
writers = l->s.writers;
old = (pad << 48) + (users << 32) + (users << 16) + writers;
new = (pad << 48) + ((users + 1) << 32) + ((users + 1) << 16) + writers;
- return (WT_ATOMIC_CAS_VAL8(l->u, old, new) == old ? 0 : EBUSY);
+ return (__wt_atomic_cas64(&l->u, old, new) ? 0 : EBUSY);
}
/*
@@ -102,7 +102,7 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_STAT_FAST_CONN_INCR(session, rwlock_read);
l = &rwlock->rwlock;
- me = WT_ATOMIC_FETCH_ADD8(l->u, (uint64_t)1 << 32);
+ me = __wt_atomic_fetch_add64(&l->u, (uint64_t)1 << 32);
val = (uint16_t)(me >> 32);
for (pause_cnt = 0; val != l->s.readers;) {
/*
@@ -138,7 +138,7 @@ __wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
session, WT_VERB_MUTEX, "rwlock: read unlock %s", rwlock->name));
l = &rwlock->rwlock;
- WT_ATOMIC_ADD2(l->s.writers, 1);
+ (void)__wt_atomic_add16(&l->s.writers, 1);
return (0);
}
@@ -163,7 +163,7 @@ __wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
users = l->s.users;
old = (pad << 48) + (users << 32) + (readers << 16) + users;
new = (pad << 48) + ((users + 1) << 32) + (readers << 16) + users;
- return (WT_ATOMIC_CAS_VAL8(l->u, old, new) == old ? 0 : EBUSY);
+ return (__wt_atomic_cas64(&l->u, old, new) ? 0 : EBUSY);
}
/*
@@ -187,7 +187,7 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* the write lock.
*/
l = &rwlock->rwlock;
- me = WT_ATOMIC_FETCH_ADD8(l->u, (uint64_t)1 << 32);
+ me = __wt_atomic_fetch_add64(&l->u, (uint64_t)1 << 32);
val = (uint16_t)(me >> 32);
while (val != l->s.writers)
WT_PAUSE();
diff --git a/src/third_party/wiredtiger/src/os_posix/os_open.c b/src/third_party/wiredtiger/src/os_posix/os_open.c
index b3dc8f1db27..1453f1a666d 100644
--- a/src/third_party/wiredtiger/src/os_posix/os_open.c
+++ b/src/third_party/wiredtiger/src/os_posix/os_open.c
@@ -177,7 +177,7 @@ setupfh:
}
if (!matched) {
WT_CONN_FILE_INSERT(conn, fh, bucket);
- (void)WT_ATOMIC_ADD4(conn->open_file_count, 1);
+ (void)__wt_atomic_add32(&conn->open_file_count, 1);
*fhp = fh;
}
@@ -223,7 +223,7 @@ __wt_close(WT_SESSION_IMPL *session, WT_FH **fhp)
/* Remove from the list. */
bucket = fh->name_hash % WT_HASH_ARRAY_SIZE;
WT_CONN_FILE_REMOVE(conn, fh, bucket);
- (void)WT_ATOMIC_SUB4(conn->open_file_count, 1);
+ (void)__wt_atomic_sub32(&conn->open_file_count, 1);
__wt_spin_unlock(session, &conn->fh_lock);
diff --git a/src/third_party/wiredtiger/src/os_win/os_mtx_cond.c b/src/third_party/wiredtiger/src/os_win/os_mtx_cond.c
index 51f6d6533c8..565928cb863 100644
--- a/src/third_party/wiredtiger/src/os_win/os_mtx_cond.c
+++ b/src/third_party/wiredtiger/src/os_win/os_mtx_cond.c
@@ -51,7 +51,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
locked = 0;
/* Fast path if already signalled. */
- if (WT_ATOMIC_ADD4(cond->waiters, 1) == 0)
+ if (__wt_atomic_addi32(&cond->waiters, 1) == 0)
return (0);
/*
@@ -97,7 +97,7 @@ __wt_cond_wait(WT_SESSION_IMPL *session, WT_CONDVAR *cond, uint64_t usecs)
}
}
- (void)WT_ATOMIC_SUB4(cond->waiters, 1);
+ (void)__wt_atomic_subi32(&cond->waiters, 1);
if (locked)
LeaveCriticalSection(&cond->mtx);
@@ -130,7 +130,7 @@ __wt_cond_signal(WT_SESSION_IMPL *session, WT_CONDVAR *cond)
if (cond->waiters == -1)
return (0);
- if (cond->waiters > 0 || !WT_ATOMIC_CAS4(cond->waiters, 0, -1)) {
+ if (cond->waiters > 0 || !__wt_atomic_casi32(&cond->waiters, 0, -1)) {
EnterCriticalSection(&cond->mtx);
locked = 1;
WakeAllConditionVariable(&cond->cond);
diff --git a/src/third_party/wiredtiger/src/os_win/os_open.c b/src/third_party/wiredtiger/src/os_win/os_open.c
index 1c6f5636501..3bd24369242 100644
--- a/src/third_party/wiredtiger/src/os_win/os_open.c
+++ b/src/third_party/wiredtiger/src/os_win/os_open.c
@@ -169,7 +169,7 @@ setupfh:
}
if (!matched) {
WT_CONN_FILE_INSERT(conn, fh, bucket);
- (void)WT_ATOMIC_ADD4(conn->open_file_count, 1);
+ (void)__wt_atomic_add32(&conn->open_file_count, 1);
*fhp = fh;
}
@@ -217,7 +217,7 @@ __wt_close(WT_SESSION_IMPL *session, WT_FH **fhp)
/* Remove from the list. */
bucket = fh->name_hash % WT_HASH_ARRAY_SIZE;
WT_CONN_FILE_REMOVE(conn, fh, bucket);
- (void)WT_ATOMIC_SUB4(conn->open_file_count, 1);
+ (void)__wt_atomic_sub32(&conn->open_file_count, 1);
__wt_spin_unlock(session, &conn->fh_lock);
diff --git a/src/third_party/wiredtiger/src/reconcile/rec_write.c b/src/third_party/wiredtiger/src/reconcile/rec_write.c
index e11490ac7fc..eaaa3a56be3 100644
--- a/src/third_party/wiredtiger/src/reconcile/rec_write.c
+++ b/src/third_party/wiredtiger/src/reconcile/rec_write.c
@@ -277,7 +277,7 @@ typedef struct {
WT_SALVAGE_COOKIE *salvage; /* If it's a salvage operation */
- int tested_ref_state; /* Debugging information */
+ uint32_t tested_ref_state; /* Debugging information */
} WT_RECONCILE;
static void __rec_bnd_cleanup(WT_SESSION_IMPL *, WT_RECONCILE *, int);
@@ -1100,8 +1100,8 @@ __rec_child_modify(WT_SESSION_IMPL *session,
* to see if the delete is visible to us. Lock down the
* structure.
*/
- if (!WT_ATOMIC_CAS4(
- ref->state, WT_REF_DELETED, WT_REF_LOCKED))
+ if (!__wt_atomic_casv32(
+ &ref->state, WT_REF_DELETED, WT_REF_LOCKED))
break;
ret = __rec_child_deleted(session, r, ref, statep);
WT_PUBLISH(ref->state, WT_REF_DELETED);
@@ -5069,7 +5069,7 @@ err: __wt_scr_free(session, &tkey);
TXNID_LT(btree->rec_max_txn, r->max_txn))
btree->rec_max_txn = r->max_txn;
- if (WT_ATOMIC_CAS4(mod->write_gen, r->orig_write_gen, 0))
+ if (__wt_atomic_cas32(&mod->write_gen, r->orig_write_gen, 0))
__wt_cache_dirty_decr(session, page);
}
diff --git a/src/third_party/wiredtiger/src/session/session_api.c b/src/third_party/wiredtiger/src/session/session_api.c
index 881be7428e5..af30edb9251 100644
--- a/src/third_party/wiredtiger/src/session/session_api.c
+++ b/src/third_party/wiredtiger/src/session/session_api.c
@@ -439,6 +439,41 @@ err: API_END_RET_NOTFOUND_MAP(session, ret);
}
/*
+ * __session_log_flush --
+ * WT_SESSION->log_flush method.
+ */
+static int
+__session_log_flush(WT_SESSION *wt_session, const char *config)
+{
+ WT_CONFIG_ITEM cval;
+ WT_CONNECTION_IMPL *conn;
+ WT_DECL_RET;
+ WT_SESSION_IMPL *session;
+ uint32_t flags;
+
+ session = (WT_SESSION_IMPL *)wt_session;
+ SESSION_API_CALL(session, log_flush, config, cfg);
+ WT_STAT_FAST_CONN_INCR(session, log_flush);
+
+ conn = S2C(session);
+ flags = 0;
+ /*
+ * If logging is not enabled there is nothing to do.
+ */
+ if (!FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED))
+ WT_ERR_MSG(session, EINVAL, "logging not enabled");
+
+ WT_ERR(__wt_config_gets_def(session, cfg, "sync", 0, &cval));
+ if (WT_STRING_MATCH("on", cval.str, cval.len))
+ flags = WT_LOG_FSYNC;
+ else if (WT_STRING_MATCH("off", cval.str, cval.len))
+ flags = WT_LOG_FLUSH;
+ ret = __wt_log_flush(session, flags);
+
+err: API_END_RET(session, ret);
+}
+
+/*
* __session_log_printf --
* WT_SESSION->log_printf method.
*/
@@ -1008,6 +1043,7 @@ __wt_open_session(WT_CONNECTION_IMPL *conn,
__session_create,
__session_compact,
__session_drop,
+ __session_log_flush,
__session_log_printf,
__session_rename,
__session_salvage,
diff --git a/src/third_party/wiredtiger/src/session/session_dhandle.c b/src/third_party/wiredtiger/src/session/session_dhandle.c
index 7488d9115f0..a087e7c896d 100644
--- a/src/third_party/wiredtiger/src/session/session_dhandle.c
+++ b/src/third_party/wiredtiger/src/session/session_dhandle.c
@@ -31,7 +31,7 @@ __session_add_dhandle(
if (dhandle_cachep != NULL)
*dhandle_cachep = dhandle_cache;
- (void)WT_ATOMIC_ADD4(session->dhandle->session_ref, 1);
+ (void)__wt_atomic_add32(&session->dhandle->session_ref, 1);
/* Sweep the handle list to remove any dead handles. */
return (__session_dhandle_sweep(session));
@@ -51,7 +51,7 @@ __session_discard_dhandle(
TAILQ_REMOVE(&session->dhandles, dhandle_cache, q);
TAILQ_REMOVE(&session->dhhash[bucket], dhandle_cache, hashq);
- (void)WT_ATOMIC_SUB4(dhandle_cache->dhandle->session_ref, 1);
+ (void)__wt_atomic_sub32(&dhandle_cache->dhandle->session_ref, 1);
__wt_overwrite_and_free(session, dhandle_cache);
}
@@ -362,7 +362,7 @@ __session_find_shared_dhandle(WT_SESSION_IMPL *session,
const char *uri, const char *checkpoint, uint32_t flags)
{
WT_RET(__wt_conn_dhandle_find(session, uri, checkpoint, flags));
- (void)WT_ATOMIC_ADD4(session->dhandle->session_ref, 1);
+ (void)__wt_atomic_add32(&session->dhandle->session_ref, 1);
return (0);
}
@@ -392,7 +392,8 @@ __wt_session_get_btree(WT_SESSION_IMPL *session,
* shared handle list and cache any handle we find.
*/
WT_WITH_DHANDLE_LOCK(session, ret =
- __session_find_shared_dhandle(session, uri, checkpoint, flags));
+ __session_find_shared_dhandle(
+ session, uri, checkpoint, flags));
dhandle = (ret == 0) ? session->dhandle : NULL;
WT_RET_NOTFOUND_OK(ret);
}
diff --git a/src/third_party/wiredtiger/src/support/rand.c b/src/third_party/wiredtiger/src/support/rand.c
index 4d0f90b87dc..12646aa3f06 100644
--- a/src/third_party/wiredtiger/src/support/rand.c
+++ b/src/third_party/wiredtiger/src/support/rand.c
@@ -83,8 +83,11 @@ __wt_random(WT_RAND_STATE volatile * rnd_state)
* to initialize the state, or initializes with a seed that results in a
* short period.
*/
- if (z == 0 || w == 0)
- __wt_random_init(rnd_state);
+ if (z == 0 || w == 0) {
+ __wt_random_init(&rnd);
+ w = M_W(rnd);
+ z = M_Z(rnd);
+ }
M_Z(rnd) = z = 36969 * (z & 65535) + (z >> 16);
M_W(rnd) = w = 18000 * (w & 65535) + (w >> 16);
diff --git a/src/third_party/wiredtiger/src/support/stat.c b/src/third_party/wiredtiger/src/support/stat.c
index 476566b227f..a95cfcdafbb 100644
--- a/src/third_party/wiredtiger/src/support/stat.c
+++ b/src/third_party/wiredtiger/src/support/stat.c
@@ -455,6 +455,7 @@ __wt_stat_init_connection_stats(WT_CONNECTION_STATS *stats)
"log: failed to find a slot large enough for record";
stats->log_bytes_payload.desc = "log: log bytes of payload data";
stats->log_bytes_written.desc = "log: log bytes written";
+ stats->log_flush.desc = "log: log flush operations";
stats->log_reads.desc = "log: log read operations";
stats->log_compress_writes.desc = "log: log records compressed";
stats->log_compress_write_fails.desc =
@@ -630,6 +631,7 @@ __wt_stat_refresh_connection_stats(void *stats_arg)
stats->log_slot_toosmall.v = 0;
stats->log_bytes_payload.v = 0;
stats->log_bytes_written.v = 0;
+ stats->log_flush.v = 0;
stats->log_reads.v = 0;
stats->log_compress_writes.v = 0;
stats->log_compress_write_fails.v = 0;
diff --git a/src/third_party/wiredtiger/src/txn/txn.c b/src/third_party/wiredtiger/src/txn/txn.c
index a391ec8be88..45be0a15a32 100644
--- a/src/third_party/wiredtiger/src/txn/txn.c
+++ b/src/third_party/wiredtiger/src/txn/txn.c
@@ -96,7 +96,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session)
if ((count = txn_global->scan_count) < 0)
WT_PAUSE();
} while (count < 0 ||
- !WT_ATOMIC_CAS4(txn_global->scan_count, count, count + 1));
+ !__wt_atomic_casiv32(&txn_global->scan_count, count, count + 1));
current_id = snap_min = txn_global->current;
prev_oldest_id = txn_global->oldest_id;
@@ -109,7 +109,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session)
/* Check that the oldest ID has not moved in the meantime. */
if (prev_oldest_id == txn_global->oldest_id) {
WT_ASSERT(session, txn_global->scan_count > 0);
- (void)WT_ATOMIC_SUB4(txn_global->scan_count, 1);
+ (void)__wt_atomic_subiv32(&txn_global->scan_count, 1);
return;
}
}
@@ -149,7 +149,7 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session)
txn_global->last_running = snap_min;
WT_ASSERT(session, txn_global->scan_count > 0);
- (void)WT_ATOMIC_SUB4(txn_global->scan_count, 1);
+ (void)__wt_atomic_subiv32(&txn_global->scan_count, 1);
__txn_sort_snapshot(session, n, current_id);
}
@@ -203,7 +203,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force)
if ((count = txn_global->scan_count) < 0)
WT_PAUSE();
} while (count < 0 ||
- !WT_ATOMIC_CAS4(txn_global->scan_count, count, count + 1));
+ !__wt_atomic_casiv32(&txn_global->scan_count, count, count + 1));
/* The oldest ID cannot change until the scan count goes to zero. */
prev_oldest_id = txn_global->oldest_id;
@@ -251,7 +251,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force)
/* Update the oldest ID. */
if (TXNID_LT(prev_oldest_id, oldest_id) &&
- WT_ATOMIC_CAS4(txn_global->scan_count, 1, -1)) {
+ __wt_atomic_casiv32(&txn_global->scan_count, 1, -1)) {
WT_ORDERED_READ(session_cnt, conn->session_cnt);
for (i = 0, s = txn_global->states; i < session_cnt; i++, s++) {
if ((id = s->id) != WT_TXN_NONE &&
@@ -277,7 +277,7 @@ __wt_txn_update_oldest(WT_SESSION_IMPL *session, int force)
oldest_session->txn.snap_min);
}
WT_ASSERT(session, txn_global->scan_count > 0);
- (void)WT_ATOMIC_SUB4(txn_global->scan_count, 1);
+ (void)__wt_atomic_subiv32(&txn_global->scan_count, 1);
}
}