diff options
author | Michael Cahill <michael.cahill@mongodb.com> | 2016-11-02 13:58:58 +1100 |
---|---|---|
committer | David Hows <howsdav@gmail.com> | 2016-11-02 13:58:58 +1100 |
commit | b90338afdc09c50cf3a321a9778c8e650fc3d6a1 (patch) | |
tree | 284481880acee2060643b016219abd9dbb42bd3e | |
parent | 938020666a9a6b5d2ae6f224af05435948fde5b0 (diff) | |
download | mongo-b90338afdc09c50cf3a321a9778c8e650fc3d6a1.tar.gz |
SERVER-26753 Don't spin on a read-lock in a tight loop. (#3113)
* SERVER-26753 Don't spin on a read-lock in a tight loop.
We could be starving a thread that we are waiting on of CPU.
* Switch to yielding on rwlocks rather than sleeping.
* Revert the attempt to be clever when spinning on rwlocks.
* Spin getting a read lock while it is available to readers.
-rw-r--r-- | src/support/mtx_rw.c | 18 | ||||
-rw-r--r-- | src/txn/txn.c | 11 |
2 files changed, 14 insertions, 15 deletions
diff --git a/src/support/mtx_rw.c b/src/support/mtx_rw.c index fefa346ac14..4a2d596c994 100644 --- a/src/support/mtx_rw.c +++ b/src/support/mtx_rw.c @@ -173,7 +173,7 @@ __wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) void __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) { - wt_rwlock_t *l; + wt_rwlock_t *l, old; uint16_t ticket; int pause_cnt; @@ -183,6 +183,15 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) l = &rwlock->rwlock; + /* Be optimistic when lock is available to readers. */ + old = *l; + while (old.s.readers == old.s.next) { + if (__wt_try_readlock(session, rwlock) == 0) + return; + WT_PAUSE(); + old = *l; + } + /* * Possibly wrap: if we have more than 64K lockers waiting, the ticket * value will wrap and two lockers will simultaneously be granted the @@ -192,17 +201,14 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock) for (pause_cnt = 0; ticket != l->s.readers;) { /* * We failed to get the lock; pause before retrying and if we've - * paused enough, sleep so we don't burn CPU to no purpose. This + * paused enough, yield so we don't burn CPU to no purpose. This * situation happens if there are more threads than cores in the * system and we're thrashing on shared resources. - * - * Don't sleep long when waiting on a read lock, hopefully we're - * waiting on another read thread to increment the reader count. */ if (++pause_cnt < WT_THOUSAND) WT_PAUSE(); else - __wt_sleep(0, 10); + __wt_yield(); } /* diff --git a/src/txn/txn.c b/src/txn/txn.c index d60ea73c660..3f128637970 100644 --- a/src/txn/txn.c +++ b/src/txn/txn.c @@ -112,7 +112,6 @@ int __wt_txn_get_snapshot(WT_SESSION_IMPL *session) { WT_CONNECTION_IMPL *conn; - WT_DECL_RET; WT_TXN *txn; WT_TXN_GLOBAL *txn_global; WT_TXN_STATE *s, *txn_state; @@ -126,14 +125,8 @@ __wt_txn_get_snapshot(WT_SESSION_IMPL *session) txn_state = WT_SESSION_TXN_STATE(session); n = 0; - /* - * Spin waiting for the lock: the sleeps in our blocking readlock - * implementation are too slow for scanning the transaction table. - */ - while ((ret = - __wt_try_readlock(session, txn_global->scan_rwlock)) == EBUSY) - WT_PAUSE(); - WT_RET(ret); + /* We're going to scan the table: wait for the lock. */ + __wt_readlock(session, txn_global->scan_rwlock); current_id = pinned_id = txn_global->current; prev_oldest_id = txn_global->oldest_id; |