summaryrefslogtreecommitdiff
path: root/src/support
diff options
context:
space:
mode:
Diffstat (limited to 'src/support')
-rw-r--r--src/support/mtx_rw.c93
-rw-r--r--src/support/stat.c20
-rw-r--r--src/support/thread_group.c83
3 files changed, 104 insertions, 92 deletions
diff --git a/src/support/mtx_rw.c b/src/support/mtx_rw.c
index ea18f556257..35ad5da23f2 100644
--- a/src/support/mtx_rw.c
+++ b/src/support/mtx_rw.c
@@ -115,23 +115,27 @@
#include "wt_internal.h"
/*
- * __wt_rwlock_alloc --
- * Allocate and initialize a read/write lock.
+ * __wt_rwlock_init --
+ * Initialize a read/write lock.
*/
-int
-__wt_rwlock_alloc(
- WT_SESSION_IMPL *session, WT_RWLOCK **rwlockp, const char *name)
+void
+__wt_rwlock_init(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- WT_RWLOCK *rwlock;
-
- __wt_verbose(session, WT_VERB_MUTEX, "rwlock: alloc %s", name);
+ WT_UNUSED(session);
- WT_RET(__wt_calloc_one(session, &rwlock));
+ l->u = 0;
+}
- rwlock->name = name;
+/*
+ * __wt_rwlock_destroy --
+ * Destroy a read/write lock.
+ */
+void
+__wt_rwlock_destroy(WT_SESSION_IMPL *session, WT_RWLOCK *l)
+{
+ WT_UNUSED(session);
- *rwlockp = rwlock;
- return (0);
+ l->u = 0;
}
/*
@@ -139,13 +143,12 @@ __wt_rwlock_alloc(
* Try to get a shared lock, fail immediately if unavailable.
*/
int
-__wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l, new, old;
+ WT_RWLOCK new, old;
WT_STAT_CONN_INCR(session, rwlock_read);
- l = &rwlock->rwlock;
new = old = *l;
/*
@@ -172,19 +175,15 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* exclusive.
*/
void
-__wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
-
- l = &rwlock->rwlock;
-
/*
* Try to get the lock in a single operation if it is available to
* readers. This avoids the situation where multiple readers arrive
* concurrently and have to line up in order to enter the lock. For
* read-heavy workloads it can make a significant difference.
*/
- while (__wt_try_readlock(session, rwlock) != 0) {
+ while (__wt_try_readlock(session, l) != 0) {
if (l->s.writers_active > 0)
__wt_yield();
else
@@ -197,9 +196,8 @@ __wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Get a shared lock.
*/
void
-__wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
uint16_t ticket;
int pause_cnt;
@@ -207,8 +205,6 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_DIAGNOSTIC_YIELD;
- l = &rwlock->rwlock;
-
/*
* Possibly wrap: if we have more than 64K lockers waiting, the ticket
* value will wrap and two lockers will simultaneously be granted the
@@ -246,14 +242,10 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Release a shared lock.
*/
void
-__wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
-
WT_UNUSED(session);
- l = &rwlock->rwlock;
-
/*
* Increment the writers value (other readers are doing the same, make
* sure we don't race).
@@ -266,13 +258,12 @@ __wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Try to get an exclusive lock, fail immediately if unavailable.
*/
int
-__wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l, new, old;
+ WT_RWLOCK new, old;
WT_STAT_CONN_INCR(session, rwlock_write);
- l = &rwlock->rwlock;
old = new = *l;
/*
@@ -296,16 +287,13 @@ __wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Wait to get an exclusive lock.
*/
void
-__wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
uint16_t ticket;
int pause_cnt;
WT_STAT_CONN_INCR(session, rwlock_write);
- l = &rwlock->rwlock;
-
/*
* Possibly wrap: if we have more than 64K lockers waiting, the ticket
* value will wrap and two lockers will simultaneously be granted the
@@ -338,13 +326,12 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Release an exclusive lock.
*/
void
-__wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l, new;
+ WT_RWLOCK new;
WT_UNUSED(session);
- l = &rwlock->rwlock;
(void)__wt_atomic_sub16(&l->s.writers_active, 1);
/*
@@ -368,40 +355,16 @@ __wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_DIAGNOSTIC_YIELD;
}
-/*
- * __wt_rwlock_destroy --
- * Destroy a read/write lock.
- */
-void
-__wt_rwlock_destroy(WT_SESSION_IMPL *session, WT_RWLOCK **rwlockp)
-{
- WT_RWLOCK *rwlock;
-
- rwlock = *rwlockp; /* Clear our caller's reference. */
- if (rwlock == NULL)
- return;
- *rwlockp = NULL;
-
- __wt_verbose(
- session, WT_VERB_MUTEX, "rwlock: destroy %s", rwlock->name);
-
- __wt_free(session, rwlock);
-}
-
#ifdef HAVE_DIAGNOSTIC
/*
* __wt_rwlock_islocked --
* Return if a read/write lock is currently locked for reading or writing.
*/
bool
-__wt_rwlock_islocked(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_rwlock_islocked(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
-
WT_UNUSED(session);
- l = &rwlock->rwlock;
-
return (l->s.writers != l->s.next || l->s.readers != l->s.next);
}
#endif
diff --git a/src/support/stat.c b/src/support/stat.c
index a9c0b24ef29..167d17137ce 100644
--- a/src/support/stat.c
+++ b/src/support/stat.c
@@ -64,6 +64,7 @@ static const char * const __stats_dsrc_desc[] = {
"cache: pages requested from the cache",
"cache: pages written from cache",
"cache: pages written requiring in-memory restoration",
+ "cache: tracked dirty bytes in the cache",
"cache: unmodified pages evicted",
"cache_walk: Average difference between current eviction generation when the page was last considered",
"cache_walk: Average on-disk page image size seen",
@@ -225,6 +226,7 @@ __wt_stat_dsrc_clear_single(WT_DSRC_STATS *stats)
stats->cache_pages_requested = 0;
stats->cache_write = 0;
stats->cache_write_restore = 0;
+ /* not clearing cache_bytes_dirty */
stats->cache_eviction_clean = 0;
/* not clearing cache_state_gen_avg_gap */
/* not clearing cache_state_avg_written_size */
@@ -372,6 +374,7 @@ __wt_stat_dsrc_aggregate_single(
to->cache_pages_requested += from->cache_pages_requested;
to->cache_write += from->cache_write;
to->cache_write_restore += from->cache_write_restore;
+ to->cache_bytes_dirty += from->cache_bytes_dirty;
to->cache_eviction_clean += from->cache_eviction_clean;
to->cache_state_gen_avg_gap += from->cache_state_gen_avg_gap;
to->cache_state_avg_written_size +=
@@ -535,6 +538,7 @@ __wt_stat_dsrc_aggregate(
WT_STAT_READ(from, cache_pages_requested);
to->cache_write += WT_STAT_READ(from, cache_write);
to->cache_write_restore += WT_STAT_READ(from, cache_write_restore);
+ to->cache_bytes_dirty += WT_STAT_READ(from, cache_bytes_dirty);
to->cache_eviction_clean += WT_STAT_READ(from, cache_eviction_clean);
to->cache_state_gen_avg_gap +=
WT_STAT_READ(from, cache_state_gen_avg_gap);
@@ -673,7 +677,11 @@ static const char * const __stats_connection_desc[] = {
"cache: eviction server unable to reach eviction goal",
"cache: eviction state",
"cache: eviction walks abandoned",
+ "cache: eviction worker thread active",
+ "cache: eviction worker thread created",
"cache: eviction worker thread evicting pages",
+ "cache: eviction worker thread removed",
+ "cache: eviction worker thread stable number",
"cache: failed eviction of pages that exceeded the in-memory maximum",
"cache: files with active eviction walks",
"cache: files with new eviction walks started",
@@ -954,7 +962,11 @@ __wt_stat_connection_clear_single(WT_CONNECTION_STATS *stats)
stats->cache_eviction_slow = 0;
/* not clearing cache_eviction_state */
stats->cache_eviction_walks_abandoned = 0;
+ /* not clearing cache_eviction_active_workers */
+ stats->cache_eviction_worker_created = 0;
stats->cache_eviction_worker_evicting = 0;
+ stats->cache_eviction_worker_removed = 0;
+ /* not clearing cache_eviction_stable_state_workers */
stats->cache_eviction_force_fail = 0;
/* not clearing cache_eviction_walks_active */
stats->cache_eviction_walks_started = 0;
@@ -1228,8 +1240,16 @@ __wt_stat_connection_aggregate(
to->cache_eviction_state += WT_STAT_READ(from, cache_eviction_state);
to->cache_eviction_walks_abandoned +=
WT_STAT_READ(from, cache_eviction_walks_abandoned);
+ to->cache_eviction_active_workers +=
+ WT_STAT_READ(from, cache_eviction_active_workers);
+ to->cache_eviction_worker_created +=
+ WT_STAT_READ(from, cache_eviction_worker_created);
to->cache_eviction_worker_evicting +=
WT_STAT_READ(from, cache_eviction_worker_evicting);
+ to->cache_eviction_worker_removed +=
+ WT_STAT_READ(from, cache_eviction_worker_removed);
+ to->cache_eviction_stable_state_workers +=
+ WT_STAT_READ(from, cache_eviction_stable_state_workers);
to->cache_eviction_force_fail +=
WT_STAT_READ(from, cache_eviction_force_fail);
to->cache_eviction_walks_active +=
diff --git a/src/support/thread_group.c b/src/support/thread_group.c
index a866d2d01c5..beb143e63e2 100644
--- a/src/support/thread_group.c
+++ b/src/support/thread_group.c
@@ -50,8 +50,7 @@ __thread_group_grow(
{
WT_THREAD *thread;
- WT_ASSERT(session,
- __wt_rwlock_islocked(session, group->lock));
+ WT_ASSERT(session, __wt_rwlock_islocked(session, &group->lock));
/*
* Any bounds checking is done by the caller so we know that
@@ -72,20 +71,19 @@ __thread_group_grow(
/*
* __thread_group_shrink --
- * Decrease the number of running threads in the group, and free any
+ * Decrease the number of running threads in the group. Optionally free any
* memory associated with slots larger than the new count.
*/
static int
__thread_group_shrink(WT_SESSION_IMPL *session,
- WT_THREAD_GROUP *group, uint32_t new_count)
+ WT_THREAD_GROUP *group, uint32_t new_count, bool free_thread)
{
WT_DECL_RET;
WT_SESSION *wt_session;
WT_THREAD *thread;
uint32_t current_slot;
- WT_ASSERT(session,
- __wt_rwlock_islocked(session, group->lock));
+ WT_ASSERT(session, __wt_rwlock_islocked(session, &group->lock));
for (current_slot = group->alloc; current_slot > new_count; ) {
/*
@@ -107,14 +105,15 @@ __thread_group_shrink(WT_SESSION_IMPL *session,
WT_TRET(__wt_thread_join(session, thread->tid));
thread->tid = 0;
}
-
- if (thread->session != NULL) {
- wt_session = (WT_SESSION *)thread->session;
- WT_TRET(wt_session->close(wt_session, NULL));
- thread->session = NULL;
+ if (free_thread) {
+ if (thread->session != NULL) {
+ wt_session = (WT_SESSION *)thread->session;
+ WT_TRET(wt_session->close(wt_session, NULL));
+ thread->session = NULL;
+ }
+ __wt_free(session, thread);
+ group->threads[current_slot] = NULL;
}
- __wt_free(session, thread);
- group->threads[current_slot] = NULL;
}
/* Update the thread group state to match our changes */
@@ -142,16 +141,19 @@ __thread_group_resize(
WT_ASSERT(session,
group->current_threads <= group->alloc &&
- __wt_rwlock_islocked(session, group->lock));
+ __wt_rwlock_islocked(session, &group->lock));
if (new_min == group->min && new_max == group->max)
return (0);
+ if (new_min > new_max)
+ return (EINVAL);
+
/*
- * Coll shrink to reduce the number of thread structures and running
+ * Call shrink to reduce the number of thread structures and running
* threads if required by the change in group size.
*/
- WT_RET(__thread_group_shrink(session, group, new_max));
+ WT_RET(__thread_group_shrink(session, group, new_max, true));
/*
* Only reallocate the thread array if it is the largest ever, since
@@ -227,9 +229,9 @@ __wt_thread_group_resize(
" from max: %" PRIu32 " -> %" PRIu32,
(void *)group, group->min, new_min, group->max, new_max);
- __wt_writelock(session, group->lock);
+ __wt_writelock(session, &group->lock);
WT_TRET(__thread_group_resize(session, group, new_min, new_max, flags));
- __wt_writeunlock(session, group->lock);
+ __wt_writeunlock(session, &group->lock);
return (ret);
}
@@ -255,17 +257,17 @@ __wt_thread_group_create(
__wt_verbose(session, WT_VERB_THREAD_GROUP,
"Creating thread group: %p", (void *)group);
- WT_RET(__wt_rwlock_alloc(session, &group->lock, "Thread group"));
+ __wt_rwlock_init(session, &group->lock);
WT_ERR(__wt_cond_alloc(
session, "Thread group cond", false, &group->wait_cond));
cond_alloced = true;
- __wt_writelock(session, group->lock);
+ __wt_writelock(session, &group->lock);
group->run_func = run_func;
group->name = name;
WT_TRET(__thread_group_resize(session, group, min, max, flags));
- __wt_writeunlock(session, group->lock);
+ __wt_writeunlock(session, &group->lock);
/* Cleanup on error to avoid leaking resources */
err: if (ret != 0) {
@@ -288,10 +290,10 @@ __wt_thread_group_destroy(WT_SESSION_IMPL *session, WT_THREAD_GROUP *group)
__wt_verbose(session, WT_VERB_THREAD_GROUP,
"Destroying thread group: %p", (void *)group);
- WT_ASSERT(session, __wt_rwlock_islocked(session, group->lock));
+ WT_ASSERT(session, __wt_rwlock_islocked(session, &group->lock));
/* Shut down all threads and free associated resources. */
- WT_TRET(__thread_group_shrink(session, group, 0));
+ WT_TRET(__thread_group_shrink(session, group, 0, true));
__wt_free(session, group->threads);
@@ -322,15 +324,42 @@ __wt_thread_group_start_one(
return (0);
if (wait)
- __wt_writelock(session, group->lock);
- else if (__wt_try_writelock(session, group->lock) != 0)
- return (0);
+ __wt_writelock(session, &group->lock);
+ else
+ WT_RET(__wt_try_writelock(session, &group->lock));
/* Recheck the bounds now that we hold the lock */
if (group->current_threads < group->max)
WT_TRET(__thread_group_grow(
session, group, group->current_threads + 1));
- __wt_writeunlock(session, group->lock);
+ __wt_writeunlock(session, &group->lock);
+
+ return (ret);
+}
+
+/*
+ * __wt_thread_group_stop_one --
+ * Stop one thread if possible.
+ */
+int
+__wt_thread_group_stop_one(
+ WT_SESSION_IMPL *session, WT_THREAD_GROUP *group, bool wait)
+{
+ WT_DECL_RET;
+
+ if (group->current_threads <= group->min)
+ return (0);
+
+ if (wait)
+ __wt_writelock(session, &group->lock);
+ else
+ WT_RET(__wt_try_writelock(session, &group->lock));
+
+ /* Recheck the bounds now that we hold the lock */
+ if (group->current_threads > group->min)
+ WT_TRET(__thread_group_shrink(
+ session, group, group->current_threads - 1, false));
+ __wt_writeunlock(session, &group->lock);
return (ret);
}