summaryrefslogtreecommitdiff
path: root/src/session/session_dhandle.c
diff options
context:
space:
mode:
authorAlex Gorrod <alexander.gorrod@mongodb.com>2017-02-01 12:11:48 +1100
committerGitHub <noreply@github.com>2017-02-01 12:11:48 +1100
commit1e24579efee68f6fdb6a4c582275a50d95d7eb81 (patch)
tree04dbe334b08a79a3e1cd1294a43602c2fbf59a07 /src/session/session_dhandle.c
parent0a70661a0d33c9705509955baafded2855054a29 (diff)
downloadmongo-1e24579efee68f6fdb6a4c582275a50d95d7eb81.tar.gz
WT-3115 Convert the dhandle list lock into a read/write lock. (#3236)
It was a spinlock, but most acquirers only need shared access and it can be a contention point in many-table workloads. Split uses of the handle list lock into small operations. In particular, only hold the handle list lock to get the "next" handle, not for loops over all the handles in the system. Update statistics around handle list lock and corresponding doc.
Diffstat (limited to 'src/session/session_dhandle.c')
-rw-r--r--src/session/session_dhandle.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/src/session/session_dhandle.c b/src/session/session_dhandle.c
index f1251794b89..ee9bddbfc19 100644
--- a/src/session/session_dhandle.c
+++ b/src/session/session_dhandle.c
@@ -44,8 +44,7 @@ __session_discard_dhandle(
TAILQ_REMOVE(&session->dhandles, dhandle_cache, q);
TAILQ_REMOVE(&session->dhhash[bucket], dhandle_cache, hashq);
- (void)__wt_atomic_sub32(&dhandle_cache->dhandle->session_ref, 1);
-
+ WT_DHANDLE_RELEASE(dhandle_cache->dhandle);
__wt_overwrite_and_free(session, dhandle_cache);
}
@@ -412,17 +411,27 @@ __session_dhandle_sweep(WT_SESSION_IMPL *session)
/*
* __session_find_shared_dhandle --
* Search for a data handle in the connection and add it to a session's
- * cache. Since the data handle isn't locked, this must be called holding
- * the handle list lock, and we must increment the handle's reference
- * count before releasing it.
+ * cache. We must increment the handle's reference count while holding
+ * the handle list lock.
*/
static int
__session_find_shared_dhandle(
WT_SESSION_IMPL *session, const char *uri, const char *checkpoint)
{
- WT_RET(__wt_conn_dhandle_find(session, uri, checkpoint));
- (void)__wt_atomic_add32(&session->dhandle->session_ref, 1);
- return (0);
+ WT_DECL_RET;
+
+ WT_WITH_HANDLE_LIST_READ_LOCK(session,
+ if ((ret = __wt_conn_dhandle_find(session, uri, checkpoint)) == 0)
+ WT_DHANDLE_ACQUIRE(session->dhandle));
+
+ if (ret != WT_NOTFOUND)
+ return (ret);
+
+ WT_WITH_HANDLE_LIST_WRITE_LOCK(session,
+ if ((ret = __wt_conn_dhandle_alloc(session, uri, checkpoint)) == 0)
+ WT_DHANDLE_ACQUIRE(session->dhandle));
+
+ return (ret);
}
/*
@@ -450,16 +459,16 @@ __session_get_dhandle(
* We didn't find a match in the session cache, search the shared
* handle list and cache the handle we find.
*/
- WT_WITH_HANDLE_LIST_LOCK(session,
- ret = __session_find_shared_dhandle(session, uri, checkpoint));
- WT_RET(ret);
+ WT_RET(__session_find_shared_dhandle(session, uri, checkpoint));
/*
* Fixup the reference count on failure (we incremented the reference
* count while holding the handle-list lock).
*/
- if ((ret = __session_add_dhandle(session)) != 0)
- (void)__wt_atomic_sub32(&session->dhandle->session_ref, 1);
+ if ((ret = __session_add_dhandle(session)) != 0) {
+ WT_DHANDLE_RELEASE(session->dhandle);
+ session->dhandle = NULL;
+ }
return (ret);
}
@@ -505,17 +514,15 @@ __wt_session_get_btree(WT_SESSION_IMPL *session,
* reopen handles in the meantime. A combination of the schema
* and handle list locks are used to enforce this.
*/
- if (!F_ISSET(session, WT_SESSION_LOCKED_SCHEMA) ||
- !F_ISSET(session, WT_SESSION_LOCKED_HANDLE_LIST)) {
+ if (!F_ISSET(session, WT_SESSION_LOCKED_SCHEMA)) {
dhandle->excl_session = NULL;
dhandle->excl_ref = 0;
F_CLR(dhandle, WT_DHANDLE_EXCLUSIVE);
__wt_writeunlock(session, &dhandle->rwlock);
WT_WITH_SCHEMA_LOCK(session,
- WT_WITH_HANDLE_LIST_LOCK(session,
- ret = __wt_session_get_btree(
- session, uri, checkpoint, cfg, flags)));
+ ret = __wt_session_get_btree(
+ session, uri, checkpoint, cfg, flags));
return (ret);
}