summaryrefslogtreecommitdiff
path: root/src/support
diff options
context:
space:
mode:
authorKeith Bostic <keith.bostic@mongodb.com>2016-12-22 23:12:30 -0500
committerMichael Cahill <michael.cahill@mongodb.com>2016-12-23 15:12:30 +1100
commit9216a5b64ec51bc1e381b96fe85345915d8fcaeb (patch)
tree59db70b4ef3b0e6091e95607c085a3f314eff243 /src/support
parent3eaa4ea8d458f1a57d3aac916e2bc8a59450af97 (diff)
downloadmongo-9216a5b64ec51bc1e381b96fe85345915d8fcaeb.tar.gz
WT-3093 Reduce the size of WT_PAGE. (#3212)
* Inline read/write locks in their structures the same way we handle spinlocks. WiredTiger currently has no need for a separate allocation, that was left over from the original POSIX pthread implementation. * Remove the lock name field from the read/write lock structure, shrinking the lock from 16B to 8B, the name field was never used, and it should be easy to identify the read/write lock's purpose from the enclosing structure. This means we no longer need two separate structures (the lock and the lock plus name), which simplifies the actual implementation. * Reduce the WT_PAGE size by pushing all of the variable-length column-store RLE array off-page into a separate allocation (instead of just the array itself), and moving the number-of-entries for the leaf pages out of the per page-type union. The latter change simplifies a bunch of stuff, row-store and fixed-length column-store no longer require a structure in the union at all, and lots of the #define's to handle that go away. * Move WT_ITEM.flags to the end of the structure, there's no reason to leave it in the middle anymore, and it's stylistically odd.
Diffstat (limited to 'src/support')
-rw-r--r--src/support/mtx_rw.c93
-rw-r--r--src/support/thread_group.c26
2 files changed, 40 insertions, 79 deletions
diff --git a/src/support/mtx_rw.c b/src/support/mtx_rw.c
index ea18f556257..35ad5da23f2 100644
--- a/src/support/mtx_rw.c
+++ b/src/support/mtx_rw.c
@@ -115,23 +115,27 @@
#include "wt_internal.h"
/*
- * __wt_rwlock_alloc --
- * Allocate and initialize a read/write lock.
+ * __wt_rwlock_init --
+ * Initialize a read/write lock.
*/
-int
-__wt_rwlock_alloc(
- WT_SESSION_IMPL *session, WT_RWLOCK **rwlockp, const char *name)
+void
+__wt_rwlock_init(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- WT_RWLOCK *rwlock;
-
- __wt_verbose(session, WT_VERB_MUTEX, "rwlock: alloc %s", name);
+ WT_UNUSED(session);
- WT_RET(__wt_calloc_one(session, &rwlock));
+ l->u = 0;
+}
- rwlock->name = name;
+/*
+ * __wt_rwlock_destroy --
+ * Destroy a read/write lock.
+ */
+void
+__wt_rwlock_destroy(WT_SESSION_IMPL *session, WT_RWLOCK *l)
+{
+ WT_UNUSED(session);
- *rwlockp = rwlock;
- return (0);
+ l->u = 0;
}
/*
@@ -139,13 +143,12 @@ __wt_rwlock_alloc(
* Try to get a shared lock, fail immediately if unavailable.
*/
int
-__wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l, new, old;
+ WT_RWLOCK new, old;
WT_STAT_CONN_INCR(session, rwlock_read);
- l = &rwlock->rwlock;
new = old = *l;
/*
@@ -172,19 +175,15 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* exclusive.
*/
void
-__wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
-
- l = &rwlock->rwlock;
-
/*
* Try to get the lock in a single operation if it is available to
* readers. This avoids the situation where multiple readers arrive
* concurrently and have to line up in order to enter the lock. For
* read-heavy workloads it can make a significant difference.
*/
- while (__wt_try_readlock(session, rwlock) != 0) {
+ while (__wt_try_readlock(session, l) != 0) {
if (l->s.writers_active > 0)
__wt_yield();
else
@@ -197,9 +196,8 @@ __wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Get a shared lock.
*/
void
-__wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
uint16_t ticket;
int pause_cnt;
@@ -207,8 +205,6 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_DIAGNOSTIC_YIELD;
- l = &rwlock->rwlock;
-
/*
* Possibly wrap: if we have more than 64K lockers waiting, the ticket
* value will wrap and two lockers will simultaneously be granted the
@@ -246,14 +242,10 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Release a shared lock.
*/
void
-__wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
-
WT_UNUSED(session);
- l = &rwlock->rwlock;
-
/*
* Increment the writers value (other readers are doing the same, make
* sure we don't race).
@@ -266,13 +258,12 @@ __wt_readunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Try to get an exclusive lock, fail immediately if unavailable.
*/
int
-__wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l, new, old;
+ WT_RWLOCK new, old;
WT_STAT_CONN_INCR(session, rwlock_write);
- l = &rwlock->rwlock;
old = new = *l;
/*
@@ -296,16 +287,13 @@ __wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Wait to get an exclusive lock.
*/
void
-__wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
uint16_t ticket;
int pause_cnt;
WT_STAT_CONN_INCR(session, rwlock_write);
- l = &rwlock->rwlock;
-
/*
* Possibly wrap: if we have more than 64K lockers waiting, the ticket
* value will wrap and two lockers will simultaneously be granted the
@@ -338,13 +326,12 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* Release an exclusive lock.
*/
void
-__wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l, new;
+ WT_RWLOCK new;
WT_UNUSED(session);
- l = &rwlock->rwlock;
(void)__wt_atomic_sub16(&l->s.writers_active, 1);
/*
@@ -368,40 +355,16 @@ __wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_DIAGNOSTIC_YIELD;
}
-/*
- * __wt_rwlock_destroy --
- * Destroy a read/write lock.
- */
-void
-__wt_rwlock_destroy(WT_SESSION_IMPL *session, WT_RWLOCK **rwlockp)
-{
- WT_RWLOCK *rwlock;
-
- rwlock = *rwlockp; /* Clear our caller's reference. */
- if (rwlock == NULL)
- return;
- *rwlockp = NULL;
-
- __wt_verbose(
- session, WT_VERB_MUTEX, "rwlock: destroy %s", rwlock->name);
-
- __wt_free(session, rwlock);
-}
-
#ifdef HAVE_DIAGNOSTIC
/*
* __wt_rwlock_islocked --
* Return if a read/write lock is currently locked for reading or writing.
*/
bool
-__wt_rwlock_islocked(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+__wt_rwlock_islocked(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
- wt_rwlock_t *l;
-
WT_UNUSED(session);
- l = &rwlock->rwlock;
-
return (l->s.writers != l->s.next || l->s.readers != l->s.next);
}
#endif
diff --git a/src/support/thread_group.c b/src/support/thread_group.c
index a866d2d01c5..a89468c367a 100644
--- a/src/support/thread_group.c
+++ b/src/support/thread_group.c
@@ -50,8 +50,7 @@ __thread_group_grow(
{
WT_THREAD *thread;
- WT_ASSERT(session,
- __wt_rwlock_islocked(session, group->lock));
+ WT_ASSERT(session, __wt_rwlock_islocked(session, &group->lock));
/*
* Any bounds checking is done by the caller so we know that
@@ -84,8 +83,7 @@ __thread_group_shrink(WT_SESSION_IMPL *session,
WT_THREAD *thread;
uint32_t current_slot;
- WT_ASSERT(session,
- __wt_rwlock_islocked(session, group->lock));
+ WT_ASSERT(session, __wt_rwlock_islocked(session, &group->lock));
for (current_slot = group->alloc; current_slot > new_count; ) {
/*
@@ -142,7 +140,7 @@ __thread_group_resize(
WT_ASSERT(session,
group->current_threads <= group->alloc &&
- __wt_rwlock_islocked(session, group->lock));
+ __wt_rwlock_islocked(session, &group->lock));
if (new_min == group->min && new_max == group->max)
return (0);
@@ -227,9 +225,9 @@ __wt_thread_group_resize(
" from max: %" PRIu32 " -> %" PRIu32,
(void *)group, group->min, new_min, group->max, new_max);
- __wt_writelock(session, group->lock);
+ __wt_writelock(session, &group->lock);
WT_TRET(__thread_group_resize(session, group, new_min, new_max, flags));
- __wt_writeunlock(session, group->lock);
+ __wt_writeunlock(session, &group->lock);
return (ret);
}
@@ -255,17 +253,17 @@ __wt_thread_group_create(
__wt_verbose(session, WT_VERB_THREAD_GROUP,
"Creating thread group: %p", (void *)group);
- WT_RET(__wt_rwlock_alloc(session, &group->lock, "Thread group"));
+ __wt_rwlock_init(session, &group->lock);
WT_ERR(__wt_cond_alloc(
session, "Thread group cond", false, &group->wait_cond));
cond_alloced = true;
- __wt_writelock(session, group->lock);
+ __wt_writelock(session, &group->lock);
group->run_func = run_func;
group->name = name;
WT_TRET(__thread_group_resize(session, group, min, max, flags));
- __wt_writeunlock(session, group->lock);
+ __wt_writeunlock(session, &group->lock);
/* Cleanup on error to avoid leaking resources */
err: if (ret != 0) {
@@ -288,7 +286,7 @@ __wt_thread_group_destroy(WT_SESSION_IMPL *session, WT_THREAD_GROUP *group)
__wt_verbose(session, WT_VERB_THREAD_GROUP,
"Destroying thread group: %p", (void *)group);
- WT_ASSERT(session, __wt_rwlock_islocked(session, group->lock));
+ WT_ASSERT(session, __wt_rwlock_islocked(session, &group->lock));
/* Shut down all threads and free associated resources. */
WT_TRET(__thread_group_shrink(session, group, 0));
@@ -322,15 +320,15 @@ __wt_thread_group_start_one(
return (0);
if (wait)
- __wt_writelock(session, group->lock);
- else if (__wt_try_writelock(session, group->lock) != 0)
+ __wt_writelock(session, &group->lock);
+ else if (__wt_try_writelock(session, &group->lock) != 0)
return (0);
/* Recheck the bounds now that we hold the lock */
if (group->current_threads < group->max)
WT_TRET(__thread_group_grow(
session, group, group->current_threads + 1));
- __wt_writeunlock(session, group->lock);
+ __wt_writeunlock(session, &group->lock);
return (ret);
}