summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Cahill <michael.cahill@wiredtiger.com>2012-11-16 15:19:33 +1100
committerMichael Cahill <michael.cahill@wiredtiger.com>2012-11-16 15:19:33 +1100
commit2b528273769314e61f753e838881a22b95947984 (patch)
treeae3112e086de2cf7764409879cc2cd03a1acac91
parent6375da7e522aea5180c7459d14011f27e40ac397 (diff)
downloadmongo-2b528273769314e61f753e838881a22b95947984.tar.gz
Switch the LSM tree lock to a read/write lock, so cursors can read the state of the tree in parallel.
-rw-r--r--src/include/lsm.h1
-rw-r--r--src/lsm/lsm_cursor.c12
-rw-r--r--src/lsm/lsm_merge.c8
-rw-r--r--src/lsm/lsm_stat.c4
-rw-r--r--src/lsm/lsm_tree.c21
-rw-r--r--src/lsm/lsm_worker.c14
6 files changed, 29 insertions, 31 deletions
diff --git a/src/include/lsm.h b/src/include/lsm.h
index 190cf51c513..4d4ee8b2326 100644
--- a/src/include/lsm.h
+++ b/src/include/lsm.h
@@ -67,7 +67,6 @@ struct __wt_lsm_tree {
WT_LSM_STATS *stats; /* LSM statistics */
- WT_SPINLOCK lock;
uint64_t dsk_gen;
uint32_t *memsizep;
diff --git a/src/lsm/lsm_cursor.c b/src/lsm/lsm_cursor.c
index 21511d20b2a..ef5b5cf360d 100644
--- a/src/lsm/lsm_cursor.c
+++ b/src/lsm/lsm_cursor.c
@@ -139,7 +139,7 @@ __clsm_open_cursors(
WT_RET(__clsm_close_cursors(clsm));
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_readlock(session, lsm_tree->rwlock);
/* Merge cursors have already figured out how many chunks they need. */
if (F_ISSET(clsm, WT_CLSM_MERGE)) {
@@ -218,7 +218,7 @@ __clsm_open_cursors(
}
clsm->dsk_gen = lsm_tree->dsk_gen;
-err: __wt_spin_unlock(session, &lsm_tree->lock);
+err: __wt_rwunlock(session, lsm_tree->rwlock);
return (ret);
}
@@ -781,11 +781,11 @@ __clsm_put(
* chunk is needed.
*/
if (clsm->primary_chunk == NULL) {
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_writelock(session, lsm_tree->rwlock);
if (clsm->dsk_gen == lsm_tree->dsk_gen)
WT_WITH_SCHEMA_LOCK(session,
ret = __wt_lsm_tree_switch(session, lsm_tree));
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
WT_RET(ret);
/* We changed the structure, or someone else did: update. */
@@ -830,12 +830,12 @@ __clsm_put(
* Take the LSM lock first: we can't acquire it while
* holding the schema lock, or we will deadlock.
*/
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_writelock(session, lsm_tree->rwlock);
/* Make sure we don't race. */
if (clsm->dsk_gen == lsm_tree->dsk_gen)
WT_WITH_SCHEMA_LOCK(session,
ret = __wt_lsm_tree_switch(session, lsm_tree));
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
}
return (ret);
diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c
index 91ddbc087ad..9e5c6d1c7a9 100644
--- a/src/lsm/lsm_merge.c
+++ b/src/lsm/lsm_merge.c
@@ -98,7 +98,7 @@ __wt_lsm_merge(
* avoid holding it while the merge is in progress: that may take a
* long time.
*/
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_writelock(session, lsm_tree->rwlock);
/*
* Only include chunks that are stable on disk and not involved in a
@@ -177,7 +177,7 @@ __wt_lsm_merge(
generation = lsm_tree->chunk[i]->generation;
start_id = lsm_tree->chunk[start_chunk]->id;
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
if (nchunks == 0)
return (WT_NOTFOUND);
@@ -254,7 +254,7 @@ __wt_lsm_merge(
}
WT_ERR(ret);
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_writelock(session, lsm_tree->rwlock);
/*
* Check whether we raced with another merge, and adjust the chunk
@@ -278,7 +278,7 @@ __wt_lsm_merge(
F_SET(chunk, WT_LSM_CHUNK_ONDISK);
ret = __wt_lsm_meta_write(session, lsm_tree);
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
err: if (src != NULL)
WT_TRET(src->close(src));
diff --git a/src/lsm/lsm_stat.c b/src/lsm/lsm_stat.c
index fe0717f3436..31bd7adf156 100644
--- a/src/lsm/lsm_stat.c
+++ b/src/lsm/lsm_stat.c
@@ -48,7 +48,7 @@ __wt_lsm_stat_init(
WT_STAT_SET(lsm_tree->stats, generation_max, 0);
/* Hold the LSM lock so that we can safely walk through the chunks. */
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_readlock(session, lsm_tree->rwlock);
/* Set the stats for this run. */
WT_STAT_SET(lsm_tree->stats, chunk_count, lsm_tree->nchunks);
@@ -145,7 +145,7 @@ __wt_lsm_stat_init(
WT_ERR(stat_cursor->close(stat_cursor));
}
-err: __wt_spin_unlock(session, &lsm_tree->lock);
+err: __wt_rwunlock(session, lsm_tree->rwlock);
__wt_scr_free(&uribuf);
return (ret);
diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c
index e2a712be54a..da19dae231b 100644
--- a/src/lsm/lsm_tree.c
+++ b/src/lsm/lsm_tree.c
@@ -35,7 +35,6 @@ __lsm_tree_discard(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
__wt_rwlock_destroy(session, &lsm_tree->rwlock);
__wt_free(session, lsm_tree->stats);
- __wt_spin_destroy(session, &lsm_tree->lock);
for (i = 0; i < lsm_tree->nchunks; i++) {
if ((chunk = lsm_tree->chunk[i]) == NULL)
@@ -423,7 +422,7 @@ __lsm_tree_open(
/* Try to open the tree. */
WT_RET(__wt_calloc_def(session, 1, &lsm_tree));
- __wt_spin_init(session, &lsm_tree->lock);
+ WT_ERR(__wt_rwlock_alloc(session, "lsm tree", &lsm_tree->rwlock));
WT_ERR(__wt_strdup(session, uri, &lsm_tree->name));
lsm_tree->filename = lsm_tree->name + strlen("lsm:");
WT_ERR(__wt_stat_alloc_lsm_stats(session, &lsm_tree->stats));
@@ -555,7 +554,7 @@ __wt_lsm_tree_drop(
WT_RET(__lsm_tree_close(session, lsm_tree));
/* Prevent any new opens. */
- WT_RET(__wt_spin_trylock(session, &lsm_tree->lock));
+ WT_RET(__wt_try_writelock(session, lsm_tree->rwlock));
/* Drop the chunks. */
for (i = 0; i < lsm_tree->nchunks; i++) {
@@ -575,11 +574,11 @@ __wt_lsm_tree_drop(
__wt_schema_drop(session, chunk->bloom_uri, cfg));
}
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
WT_ERR(__wt_metadata_remove(session, name));
if (0) {
-err: __wt_spin_unlock(session, &lsm_tree->lock);
+err: __wt_rwunlock(session, lsm_tree->rwlock);
}
__lsm_tree_discard(session, lsm_tree);
return (ret);
@@ -610,7 +609,7 @@ __wt_lsm_tree_rename(WT_SESSION_IMPL *session,
WT_RET(__lsm_tree_close(session, lsm_tree));
/* Prevent any new opens. */
- WT_RET(__wt_spin_trylock(session, &lsm_tree->lock));
+ WT_RET(__wt_try_writelock(session, lsm_tree->rwlock));
/* Set the new name. */
__wt_free(session, lsm_tree->name);
@@ -641,12 +640,12 @@ __wt_lsm_tree_rename(WT_SESSION_IMPL *session,
}
}
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
WT_ERR(__wt_lsm_meta_write(session, lsm_tree));
WT_ERR(__wt_metadata_remove(session, oldname));
if (0) {
-err: __wt_spin_unlock(session, &lsm_tree->lock);
+err: __wt_rwunlock(session, lsm_tree->rwlock);
}
if (old != NULL)
__wt_free(session, old);
@@ -675,7 +674,7 @@ __wt_lsm_tree_truncate(
WT_RET(__lsm_tree_close(session, lsm_tree));
/* Prevent any new opens. */
- WT_RET(__wt_spin_trylock(session, &lsm_tree->lock));
+ WT_RET(__wt_try_writelock(session, lsm_tree->rwlock));
/* Create the new chunk. */
WT_ERR(__wt_calloc_def(session, 1, &chunk));
@@ -689,11 +688,11 @@ __wt_lsm_tree_truncate(
WT_ERR(__wt_lsm_meta_write(session, lsm_tree));
WT_ERR(__lsm_tree_start_worker(session, lsm_tree));
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
__wt_lsm_tree_release(session, lsm_tree);
if (0) {
-err: __wt_spin_unlock(session, &lsm_tree->lock);
+err: __wt_rwunlock(session, lsm_tree->rwlock);
__lsm_tree_discard(session, lsm_tree);
}
return (ret);
diff --git a/src/lsm/lsm_worker.c b/src/lsm/lsm_worker.c
index 54a8298e877..429c38d39ed 100644
--- a/src/lsm/lsm_worker.c
+++ b/src/lsm/lsm_worker.c
@@ -112,10 +112,10 @@ __wt_lsm_bloom_worker(void *arg)
}
++j;
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_writelock(session, lsm_tree->rwlock);
++lsm_tree->dsk_gen;
ret = __wt_lsm_meta_write(session, lsm_tree);
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
if (ret != 0) {
(void)__wt_err(session, ret,
@@ -224,9 +224,9 @@ __wt_lsm_copy_chunks(WT_SESSION_IMPL *session,
/* Always return zero chunks on error. */
cookie->nchunks = 0;
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_readlock(session, lsm_tree->rwlock);
if (!F_ISSET(lsm_tree, WT_LSM_TREE_WORKING)) {
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
/* The actual error value is ignored. */
return (WT_ERROR);
}
@@ -245,7 +245,7 @@ __wt_lsm_copy_chunks(WT_SESSION_IMPL *session,
if (ret == 0 && nchunks > 0)
memcpy(cookie->chunk_array, lsm_tree->chunk,
nchunks * sizeof(*lsm_tree->chunk));
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
if (ret == 0)
cookie->nchunks = nchunks;
@@ -332,7 +332,7 @@ __lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
if (!locked) {
locked = 1;
/* TODO: Do we need the lsm_tree lock for all drops? */
- __wt_spin_lock(session, &lsm_tree->lock);
+ __wt_writelock(session, lsm_tree->rwlock);
}
if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) {
WT_WITH_SCHEMA_LOCK(session, ret = __wt_schema_drop(
@@ -375,7 +375,7 @@ __lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
}
if (locked) {
err: WT_TRET(__wt_lsm_meta_write(session, lsm_tree));
- __wt_spin_unlock(session, &lsm_tree->lock);
+ __wt_rwunlock(session, lsm_tree->rwlock);
}
/* Returning non-zero means there is no work to do. */