summaryrefslogtreecommitdiff
path: root/src/support/mtx_rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/support/mtx_rw.c')
-rw-r--r--src/support/mtx_rw.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/src/support/mtx_rw.c b/src/support/mtx_rw.c
index fefa346ac14..ea18f556257 100644
--- a/src/support/mtx_rw.c
+++ b/src/support/mtx_rw.c
@@ -167,6 +167,32 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
}
/*
+ * __wt_readlock_spin --
+ * Spin to get a read lock: only yield the CPU if the lock is held
+ * exclusive.
+ */
+void
+__wt_readlock_spin(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
+{
+ wt_rwlock_t *l;
+
+ l = &rwlock->rwlock;
+
+ /*
+ * Try to get the lock in a single operation if it is available to
+ * readers. This avoids the situation where multiple readers arrive
+ * concurrently and have to line up in order to enter the lock. For
+ * read-heavy workloads it can make a significant difference.
+ */
+ while (__wt_try_readlock(session, rwlock) != 0) {
+ if (l->s.writers_active > 0)
+ __wt_yield();
+ else
+ WT_PAUSE();
+ }
+}
+
+/*
* __wt_readlock --
* Get a shared lock.
*/
@@ -192,17 +218,14 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
for (pause_cnt = 0; ticket != l->s.readers;) {
/*
* We failed to get the lock; pause before retrying and if we've
- * paused enough, sleep so we don't burn CPU to no purpose. This
+ * paused enough, yield so we don't burn CPU to no purpose. This
* situation happens if there are more threads than cores in the
* system and we're thrashing on shared resources.
- *
- * Don't sleep long when waiting on a read lock, hopefully we're
- * waiting on another read thread to increment the reader count.
*/
if (++pause_cnt < WT_THOUSAND)
WT_PAUSE();
else
- __wt_sleep(0, 10);
+ __wt_yield();
}
/*
@@ -264,6 +287,7 @@ __wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
/* The replacement lock value is a result of allocating a new ticket. */
++new.s.next;
+ ++new.s.writers_active;
return (__wt_atomic_cas64(&l->u, old.u, new.u) ? 0 : EBUSY);
}
@@ -288,6 +312,7 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
* lock.
*/
ticket = __wt_atomic_fetch_add16(&l->s.next, 1);
+ (void)__wt_atomic_add16(&l->s.writers_active, 1);
for (pause_cnt = 0; ticket != l->s.writers;) {
/*
* We failed to get the lock; pause before retrying and if we've
@@ -319,14 +344,15 @@ __wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
WT_UNUSED(session);
+ l = &rwlock->rwlock;
+ (void)__wt_atomic_sub16(&l->s.writers_active, 1);
+
/*
* Ensure that all updates made while the lock was held are visible to
* the next thread to acquire the lock.
*/
WT_WRITE_BARRIER();
- l = &rwlock->rwlock;
-
new = *l;
/*