summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bench/wtperf/wtperf.c8
-rw-r--r--src/btree/bt_read.c6
-rw-r--r--src/conn/conn_cache_pool.c2
-rw-r--r--src/conn/conn_ckpt.c2
-rw-r--r--src/conn/conn_stat.c2
-rw-r--r--src/evict/evict_lru.c10
-rw-r--r--src/include/misc.h1
-rw-r--r--src/include/mutex.i4
-rw-r--r--src/include/os.h4
-rw-r--r--src/log/log.c2
-rw-r--r--src/log/log_slot.c2
-rw-r--r--src/lsm/lsm_cursor.c2
-rw-r--r--src/lsm/lsm_merge.c2
-rw-r--r--src/lsm/lsm_tree.c8
-rw-r--r--src/os_posix/os_mtx_cond.c4
-rw-r--r--src/os_posix/os_mtx_rw.c4
-rw-r--r--src/os_posix/os_sleep.c4
-rw-r--r--src/os_posix/os_time.c2
-rw-r--r--src/os_win/os_sleep.c6
-rw-r--r--src/reconcile/rec_write.c2
-rw-r--r--src/support/err.c3
21 files changed, 42 insertions, 38 deletions
diff --git a/bench/wtperf/wtperf.c b/bench/wtperf/wtperf.c
index 488d2ff4edf..9ac96862fa1 100644
--- a/bench/wtperf/wtperf.c
+++ b/bench/wtperf/wtperf.c
@@ -653,7 +653,7 @@ op_err: if (ret == WT_ROLLBACK && ops_per_txn != 0) {
goto err;
}
++trk->latency_ops;
- usecs = ns_to_us(WT_TIMEDIFF_NS(stop, start));
+ usecs = WT_TIMEDIFF_US(stop, start);
track_operation(trk, usecs);
}
/* Increment operation count */
@@ -936,7 +936,7 @@ populate_thread(void *arg)
goto err;
}
++trk->latency_ops;
- usecs = ns_to_us(WT_TIMEDIFF_NS(stop, start));
+ usecs = WT_TIMEDIFF_US(stop, start);
track_operation(trk, usecs);
}
++thread->insert.ops; /* Same as trk->ops */
@@ -1068,7 +1068,7 @@ populate_async(void *arg)
goto err;
}
++trk->latency_ops;
- usecs = ns_to_us(WT_TIMEDIFF_NS(stop, start));
+ usecs = WT_TIMEDIFF_US(stop, start);
track_operation(trk, usecs);
}
if ((ret = session->close(session, NULL)) != 0) {
@@ -2423,7 +2423,7 @@ worker_throttle(int64_t throttle, int64_t *ops, struct timespec *interval)
* If we did enough operations in less than a second, sleep for
* the rest of the second.
*/
- usecs_to_complete = ns_to_us(WT_TIMEDIFF_NS(now, *interval));
+ usecs_to_complete = WT_TIMEDIFF_US(now, *interval);
if (usecs_to_complete < USEC_PER_SEC)
(void)usleep((useconds_t)(USEC_PER_SEC - usecs_to_complete));
diff --git a/src/btree/bt_read.c b/src/btree/bt_read.c
index e60f7b3fb02..389ac761c5b 100644
--- a/src/btree/bt_read.c
+++ b/src/btree/bt_read.c
@@ -586,8 +586,8 @@ skip_evict:
* CPU to no purpose.
*/
if (stalled)
- wait_cnt += 1000;
- else if (++wait_cnt < 1000) {
+ wait_cnt += WT_THOUSAND;
+ else if (++wait_cnt < WT_THOUSAND) {
__wt_yield();
continue;
}
@@ -603,7 +603,7 @@ skip_evict:
if (cache_work)
continue;
}
- sleep_cnt = WT_MIN(sleep_cnt + 1000, 10000);
+ sleep_cnt = WT_MIN(sleep_cnt + WT_THOUSAND, 10000);
WT_STAT_FAST_CONN_INCRV(session, page_sleep, sleep_cnt);
__wt_sleep(0, sleep_cnt);
}
diff --git a/src/conn/conn_cache_pool.c b/src/conn/conn_cache_pool.c
index aa14e9aadde..8d16f94c092 100644
--- a/src/conn/conn_cache_pool.c
+++ b/src/conn/conn_cache_pool.c
@@ -734,7 +734,7 @@ __wt_cache_pool_server(void *arg)
F_ISSET(cache, WT_CACHE_POOL_RUN)) {
if (cp->currently_used <= cp->size)
WT_ERR(__wt_cond_wait(session,
- cp->cache_pool_cond, 1000000));
+ cp->cache_pool_cond, WT_MILLION));
/*
* Re-check pool run flag - since we want to avoid getting the
diff --git a/src/conn/conn_ckpt.c b/src/conn/conn_ckpt.c
index 8f039e61654..b47e2550b23 100644
--- a/src/conn/conn_ckpt.c
+++ b/src/conn/conn_ckpt.c
@@ -31,7 +31,7 @@ __ckpt_server_config(WT_SESSION_IMPL *session, const char **cfg, bool *startp)
* Checkpoints based on log size also require logging be enabled.
*/
WT_RET(__wt_config_gets(session, cfg, "checkpoint.wait", &cval));
- conn->ckpt_usecs = (uint64_t)cval.val * 1000000;
+ conn->ckpt_usecs = (uint64_t)cval.val * WT_MILLION;
WT_RET(__wt_config_gets(session, cfg, "checkpoint.log_size", &cval));
conn->ckpt_logsize = (wt_off_t)cval.val;
diff --git a/src/conn/conn_stat.c b/src/conn/conn_stat.c
index ec3a630581a..455ec9514f0 100644
--- a/src/conn/conn_stat.c
+++ b/src/conn/conn_stat.c
@@ -83,7 +83,7 @@ __statlog_config(WT_SESSION_IMPL *session, const char **cfg, bool *runp)
WT_RET(__wt_config_gets(session, cfg, "statistics_log.wait", &cval));
/* Only start the server if wait time is non-zero */
*runp = cval.val != 0;
- conn->stat_usecs = (uint64_t)cval.val * 1000000;
+ conn->stat_usecs = (uint64_t)cval.val * WT_MILLION;
WT_RET(__wt_config_gets(
session, cfg, "statistics_log.on_close", &cval));
diff --git a/src/evict/evict_lru.c b/src/evict/evict_lru.c
index 5b9851852c7..fa6c4f4313f 100644
--- a/src/evict/evict_lru.c
+++ b/src/evict/evict_lru.c
@@ -183,10 +183,10 @@ __evict_server(void *arg)
session, &conn->dhandle_lock)) == EBUSY &&
!F_ISSET(cache, WT_CACHE_CLEAR_WALKS);
spins++) {
- if (spins < 1000)
+ if (spins < WT_THOUSAND)
__wt_yield();
else
- __wt_sleep(0, 1000);
+ __wt_sleep(0, WT_THOUSAND);
}
/*
* If we gave up acquiring the lock, that indicates a
@@ -601,7 +601,7 @@ __evict_pass(WT_SESSION_IMPL *session)
* that can free space in cache, such as LSM discarding
* handles.
*/
- __wt_sleep(0, 1000 * (uint64_t)loop);
+ __wt_sleep(0, WT_THOUSAND * (uint64_t)loop);
if (loop == 100) {
/*
* Mark the cache as stuck if we need space
@@ -992,10 +992,10 @@ retry: while (slot < max_entries && ret == 0) {
session, &conn->dhandle_lock)) == EBUSY &&
!F_ISSET(cache, WT_CACHE_CLEAR_WALKS);
spins++) {
- if (spins < 1000)
+ if (spins < WT_THOUSAND)
__wt_yield();
else
- __wt_sleep(0, 1000);
+ __wt_sleep(0, WT_THOUSAND);
}
if (ret != 0)
break;
diff --git a/src/include/misc.h b/src/include/misc.h
index eca77214b47..e542baec642 100644
--- a/src/include/misc.h
+++ b/src/include/misc.h
@@ -13,6 +13,7 @@
#define WT_UNUSED(var) (void)(var)
/* Basic constants. */
+#define WT_THOUSAND (1000)
#define WT_MILLION (1000000)
#define WT_BILLION (1000000000)
diff --git a/src/include/mutex.i b/src/include/mutex.i
index 843c4ad9350..7eb042dd79f 100644
--- a/src/include/mutex.i
+++ b/src/include/mutex.i
@@ -18,7 +18,7 @@
/* Default to spinning 1000 times before yielding. */
#ifndef WT_SPIN_COUNT
-#define WT_SPIN_COUNT 1000
+#define WT_SPIN_COUNT WT_THOUSAND
#endif
/*
@@ -300,7 +300,7 @@ __wt_fair_lock(WT_SESSION_IMPL *session, WT_FAIR_LOCK *lock)
* situation happens if there are more threads than cores in the
* system and we're thrashing on shared resources.
*/
- if (++pause_cnt < 1000)
+ if (++pause_cnt < WT_THOUSAND)
WT_PAUSE();
else
__wt_sleep(0, 10);
diff --git a/src/include/os.h b/src/include/os.h
index 78984d39d94..d135fd9eb1f 100644
--- a/src/include/os.h
+++ b/src/include/os.h
@@ -66,8 +66,10 @@ typedef enum {
} while (0)
#define WT_TIMEDIFF_NS(end, begin) \
- (1000000000 * (uint64_t)((end).tv_sec - (begin).tv_sec) + \
+ (WT_BILLION * (uint64_t)((end).tv_sec - (begin).tv_sec) + \
(uint64_t)(end).tv_nsec - (uint64_t)(begin).tv_nsec)
+#define WT_TIMEDIFF_US(end, begin) \
+ (WT_TIMEDIFF_NS((end), (begin)) / WT_THOUSAND)
#define WT_TIMEDIFF_MS(end, begin) \
(WT_TIMEDIFF_NS((end), (begin)) / WT_MILLION)
#define WT_TIMEDIFF_SEC(end, begin) \
diff --git a/src/log/log.c b/src/log/log.c
index 44dc7dc30a7..3106094e7e3 100644
--- a/src/log/log.c
+++ b/src/log/log.c
@@ -1313,7 +1313,7 @@ __wt_log_release(WT_SESSION_IMPL *session, WT_LOGSLOT *slot, bool *freep)
if (F_ISSET(session, WT_SESSION_LOCKED_SLOT))
__wt_spin_unlock(session, &log->log_slot_lock);
WT_ERR(__wt_cond_signal(session, conn->log_wrlsn_cond));
- if (++yield_count < 1000)
+ if (++yield_count < WT_THOUSAND)
__wt_yield();
else
ret = __wt_cond_wait(session, log->log_write_cond, 200);
diff --git a/src/log/log_slot.c b/src/log/log_slot.c
index b3790412536..255551f99a4 100644
--- a/src/log/log_slot.c
+++ b/src/log/log_slot.c
@@ -380,7 +380,7 @@ __wt_log_slot_join(WT_SESSION_IMPL *session, uint64_t mysize,
* There should almost always be a slot open.
*/
#ifdef HAVE_DIAGNOSTIC
- unbuf_force = (++log->write_calls % 1000) == 0;
+ unbuf_force = (++log->write_calls % WT_THOUSAND) == 0;
#endif
for (;;) {
WT_BARRIER();
diff --git a/src/lsm/lsm_cursor.c b/src/lsm/lsm_cursor.c
index dbd6a105475..d4132472594 100644
--- a/src/lsm/lsm_cursor.c
+++ b/src/lsm/lsm_cursor.c
@@ -81,7 +81,7 @@ __wt_clsm_await_switch(WT_CURSOR_LSM *clsm)
lsm_tree->nchunks == 0 ||
clsm->dsk_gen == lsm_tree->dsk_gen;
++waited) {
- if (waited % 1000 == 0)
+ if (waited % WT_THOUSAND == 0)
WT_RET(__wt_lsm_manager_push_entry(
session, WT_LSM_WORK_SWITCH, 0, lsm_tree));
__wt_sleep(0, 10);
diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c
index 9a4f329daa9..1a2608803e4 100644
--- a/src/lsm/lsm_merge.c
+++ b/src/lsm/lsm_merge.c
@@ -457,7 +457,7 @@ __wt_lsm_merge(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, u_int id)
cfg[2] = NULL;
WT_ERR(__wt_open_cursor(session, chunk->uri, NULL, cfg, &dest));
-#define LSM_MERGE_CHECK_INTERVAL 1000
+#define LSM_MERGE_CHECK_INTERVAL WT_THOUSAND
for (insert_count = 0; (ret = src->next(src)) == 0; insert_count++) {
if (insert_count % LSM_MERGE_CHECK_INTERVAL == 0) {
if (!F_ISSET(lsm_tree, WT_LSM_TREE_ACTIVE))
diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c
index 842e0bdd7c2..c9d8b23f7ba 100644
--- a/src/lsm/lsm_tree.c
+++ b/src/lsm/lsm_tree.c
@@ -111,7 +111,7 @@ __lsm_tree_close(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* other schema level operations will return EBUSY, even though
* we're dropping the schema lock here.
*/
- if (i % 1000 == 0) {
+ if (i % WT_THOUSAND == 0) {
WT_WITHOUT_LOCKS(session, ret =
__wt_lsm_manager_clear_tree(session, lsm_tree));
WT_RET(ret);
@@ -783,8 +783,8 @@ __wt_lsm_tree_throttle(
}
/* Put an upper bound of 1s on both throttle calculations. */
- lsm_tree->ckpt_throttle = WT_MIN(1000000, lsm_tree->ckpt_throttle);
- lsm_tree->merge_throttle = WT_MIN(1000000, lsm_tree->merge_throttle);
+ lsm_tree->ckpt_throttle = WT_MIN(WT_MILLION, lsm_tree->ckpt_throttle);
+ lsm_tree->merge_throttle = WT_MIN(WT_MILLION, lsm_tree->merge_throttle);
/*
* Update our estimate of how long each in-memory chunk stays active.
@@ -807,7 +807,7 @@ __wt_lsm_tree_throttle(
if (timediff < 10 * oldtime)
lsm_tree->chunk_fill_ms =
(3 * lsm_tree->chunk_fill_ms +
- timediff / 1000000) / 4;
+ timediff / WT_MILLION) / 4;
}
}
diff --git a/src/os_posix/os_mtx_cond.c b/src/os_posix/os_mtx_cond.c
index fac2c06957d..d5fc86b648b 100644
--- a/src/os_posix/os_mtx_cond.c
+++ b/src/os_posix/os_mtx_cond.c
@@ -76,9 +76,9 @@ __wt_cond_wait_signal(
if (usecs > 0) {
WT_ERR(__wt_epoch(session, &ts));
ts.tv_sec += (time_t)
- (((uint64_t)ts.tv_nsec + 1000 * usecs) / WT_BILLION);
+ (((uint64_t)ts.tv_nsec + WT_THOUSAND * usecs) / WT_BILLION);
ts.tv_nsec = (long)
- (((uint64_t)ts.tv_nsec + 1000 * usecs) % WT_BILLION);
+ (((uint64_t)ts.tv_nsec + WT_THOUSAND * usecs) % WT_BILLION);
ret = pthread_cond_timedwait(&cond->cond, &cond->mtx, &ts);
} else
ret = pthread_cond_wait(&cond->cond, &cond->mtx);
diff --git a/src/os_posix/os_mtx_rw.c b/src/os_posix/os_mtx_rw.c
index d47ab197643..46f134feabb 100644
--- a/src/os_posix/os_mtx_rw.c
+++ b/src/os_posix/os_mtx_rw.c
@@ -201,7 +201,7 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Don't sleep long when waiting on a read lock, hopefully we're
* waiting on another read thread to increment the reader count.
*/
- if (++pause_cnt < 1000)
+ if (++pause_cnt < WT_THOUSAND)
WT_PAUSE();
else
__wt_sleep(0, 10);
@@ -300,7 +300,7 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* situation happens if there are more threads than cores in the
* system and we're thrashing on shared resources.
*/
- if (++pause_cnt < 1000)
+ if (++pause_cnt < WT_THOUSAND)
WT_PAUSE();
else
__wt_sleep(0, 10);
diff --git a/src/os_posix/os_sleep.c b/src/os_posix/os_sleep.c
index f888e51bf7f..4e90edabc53 100644
--- a/src/os_posix/os_sleep.c
+++ b/src/os_posix/os_sleep.c
@@ -17,8 +17,8 @@ __wt_sleep(uint64_t seconds, uint64_t micro_seconds)
{
struct timeval t;
- t.tv_sec = (time_t)(seconds + micro_seconds / 1000000);
- t.tv_usec = (suseconds_t)(micro_seconds % 1000000);
+ t.tv_sec = (time_t)(seconds + micro_seconds / WT_MILLION);
+ t.tv_usec = (suseconds_t)(micro_seconds % WT_MILLION);
(void)select(0, NULL, NULL, NULL, &t);
}
diff --git a/src/os_posix/os_time.c b/src/os_posix/os_time.c
index 6bddf4a18dd..c3052df62e7 100644
--- a/src/os_posix/os_time.c
+++ b/src/os_posix/os_time.c
@@ -28,7 +28,7 @@ __wt_epoch(WT_SESSION_IMPL *session, struct timespec *tsp)
WT_SYSCALL_RETRY(gettimeofday(&v, NULL), ret);
if (ret == 0) {
tsp->tv_sec = v.tv_sec;
- tsp->tv_nsec = v.tv_usec * 1000;
+ tsp->tv_nsec = v.tv_usec * WT_THOUSAND;
return (0);
}
WT_RET_MSG(session, ret, "gettimeofday");
diff --git a/src/os_win/os_sleep.c b/src/os_win/os_sleep.c
index 484cf218f26..33e04c1d8a9 100644
--- a/src/os_win/os_sleep.c
+++ b/src/os_win/os_sleep.c
@@ -19,7 +19,7 @@ __wt_sleep(uint64_t seconds, uint64_t micro_seconds)
* If the caller wants a small pause, set to our
* smallest granularity.
*/
- if (seconds == 0 && micro_seconds < 1000)
- micro_seconds = 1000;
- Sleep(seconds * 1000 + micro_seconds / 1000);
+ if (seconds == 0 && micro_seconds < WT_THOUSAND)
+ micro_seconds = WT_THOUSAND;
+ Sleep(seconds * WT_THOUSAND + micro_seconds / WT_THOUSAND);
}
diff --git a/src/reconcile/rec_write.c b/src/reconcile/rec_write.c
index 4479f4a8515..fe60cc16063 100644
--- a/src/reconcile/rec_write.c
+++ b/src/reconcile/rec_write.c
@@ -960,7 +960,7 @@ __rec_bnd_cleanup(WT_SESSION_IMPL *session, WT_RECONCILE *r, bool destroy)
* than 10,000 boundary structure elements, discard the boundary array
* entirely and start over next time.
*/
- if (destroy || r->bnd_entries > 10 * 1000) {
+ if (destroy || r->bnd_entries > 10 * WT_THOUSAND) {
for (bnd = r->bnd, i = 0; i < r->bnd_entries; ++bnd, ++i) {
__wt_free(session, bnd->addr.addr);
__wt_free(session, bnd->disk_image);
diff --git a/src/support/err.c b/src/support/err.c
index c4bf4e8946a..de518cbf08b 100644
--- a/src/support/err.c
+++ b/src/support/err.c
@@ -199,7 +199,8 @@ __wt_eventv(WT_SESSION_IMPL *session, bool msg_event, int error,
remain = WT_PTRDIFF(end, p);
wlen = (size_t)snprintf(p, remain,
"[%" PRIuMAX ":%" PRIuMAX "][%s]",
- (uintmax_t)ts.tv_sec, (uintmax_t)ts.tv_nsec / 1000, tid);
+ (uintmax_t)ts.tv_sec,
+ (uintmax_t)ts.tv_nsec / WT_THOUSAND, tid);
p = wlen >= remain ? end : p + wlen;
prefix_cnt = 1;
}