summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Cahill <mjc@wiredtiger.com>2013-12-11 17:50:58 -0800
committerMichael Cahill <mjc@wiredtiger.com>2013-12-11 17:50:58 -0800
commitad9a551aeceb89258e42996cc944725dd9b78fd4 (patch)
tree298c3421b766c8e508aa91c7053e3559a7a963db
parentb7a31c4930cbcffbe6a51d229d24a2744f5afae3 (diff)
parent405d7e623811b8c86c06702782d0ee6c4c5ac928 (diff)
downloadmongo-ad9a551aeceb89258e42996cc944725dd9b78fd4.tar.gz
Merge pull request #806 from wiredtiger/lsm-chunk-flags-atomic
Switch to atomic flags for LSM chunks, to avoid racing when setting merge flags
-rw-r--r--src/include/lsm.h2
-rw-r--r--src/lsm/lsm_cursor.c34
-rw-r--r--src/lsm/lsm_merge.c17
-rw-r--r--src/lsm/lsm_meta.c13
-rw-r--r--src/lsm/lsm_stat.c7
-rw-r--r--src/lsm/lsm_tree.c24
-rw-r--r--src/lsm/lsm_worker.c20
7 files changed, 63 insertions, 54 deletions
diff --git a/src/include/lsm.h b/src/include/lsm.h
index 9000e4cb226..fbd6b66a1a8 100644
--- a/src/include/lsm.h
+++ b/src/include/lsm.h
@@ -65,7 +65,7 @@ struct __wt_lsm_chunk {
#define WT_LSM_CHUNK_MERGING 0x08
#define WT_LSM_CHUNK_ONDISK 0x10
#define WT_LSM_CHUNK_STABLE 0x20
- uint32_t flags;
+ uint32_t flags_atomic;
} WT_GCC_ATTRIBUTE((aligned(WT_CACHE_LINE_ALIGNMENT)));
/*
diff --git a/src/lsm/lsm_cursor.c b/src/lsm/lsm_cursor.c
index 14c1f634b25..95ab908e507 100644
--- a/src/lsm/lsm_cursor.c
+++ b/src/lsm/lsm_cursor.c
@@ -258,8 +258,8 @@ __clsm_open_cursors(
* metadata inconsistent.
*/
if (update && (lsm_tree->nchunks == 0 ||
- F_ISSET(lsm_tree->chunk[lsm_tree->nchunks - 1],
- WT_LSM_CHUNK_ONDISK))) {
+ (chunk = lsm_tree->chunk[lsm_tree->nchunks - 1]) == NULL ||
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK))) {
/* Release our lock because switch will get a write lock. */
locked = 0;
WT_ERR(__wt_lsm_tree_unlock(session, lsm_tree));
@@ -342,13 +342,13 @@ retry: if (F_ISSET(clsm, WT_CLSM_MERGE)) {
checkpoint = ((WT_CURSOR_BTREE *)*cp)->
btree->dhandle->checkpoint;
if (checkpoint == NULL &&
- F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) &&
- !F_ISSET(chunk, WT_LSM_CHUNK_EMPTY))
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK) &&
+ !F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_EMPTY))
break;
/* Make sure the Bloom config matches. */
if (clsm->blooms[ngood] == NULL &&
- F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
break;
}
@@ -406,23 +406,25 @@ retry: if (F_ISSET(clsm, WT_CLSM_MERGE)) {
*/
WT_ASSERT(session, *cp == NULL);
ret = __wt_open_cursor(session, chunk->uri, c,
- (F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) &&
- !F_ISSET(chunk, WT_LSM_CHUNK_EMPTY)) ? ckpt_cfg : NULL, cp);
+ (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK) &&
+ !F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_EMPTY)) ?
+ ckpt_cfg : NULL, cp);
/*
* XXX kludge: we may have an empty chunk where no checkpoint
* was written. If so, try to open the ordinary handle on that
* chunk instead.
*/
- if (ret == WT_NOTFOUND && F_ISSET(chunk, WT_LSM_CHUNK_ONDISK)) {
+ if (ret == WT_NOTFOUND &&
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK)) {
ret = __wt_open_cursor(
session, chunk->uri, c, NULL, cp);
if (ret == 0)
- F_SET(chunk, WT_LSM_CHUNK_EMPTY);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_EMPTY);
}
WT_ERR(ret);
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM) &&
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM) &&
!F_ISSET(clsm, WT_CLSM_MERGE))
WT_ERR(__wt_bloom_open(session, chunk->bloom_uri,
lsm_tree->bloom_bit_count,
@@ -434,7 +436,7 @@ retry: if (F_ISSET(clsm, WT_CLSM_MERGE)) {
}
/* The last chunk is our new primary. */
- if (chunk != NULL && !F_ISSET(chunk, WT_LSM_CHUNK_ONDISK)) {
+ if (chunk != NULL && !F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK)) {
clsm->primary_chunk = chunk;
primary = clsm->cursors[clsm->nchunks - 1];
WT_WITH_BTREE(session, ((WT_CURSOR_BTREE *)(primary))->btree,
@@ -459,13 +461,13 @@ err: F_CLR(session, WT_SESSION_NO_CACHE_CHECK);
checkpoint = ((WT_CURSOR_BTREE *)*cp)->
btree->dhandle->checkpoint;
WT_ASSERT(session,
- (F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) &&
- !F_ISSET(chunk, WT_LSM_CHUNK_EMPTY)) ?
+ (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK) &&
+ !F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_EMPTY)) ?
checkpoint != NULL : checkpoint == NULL);
/* Make sure the Bloom config matches. */
WT_ASSERT(session,
- (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM) &&
+ (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM) &&
!F_ISSET(clsm, WT_CLSM_MERGE)) ?
clsm->blooms[i] != NULL : clsm->blooms[i] == NULL);
}
@@ -1079,9 +1081,9 @@ __clsm_put(WT_SESSION_IMPL *session,
lsm_tree = clsm->lsm_tree;
- WT_ASSERT(session, clsm->primary_chunk != NULL);
- WT_ASSERT(session, !F_ISSET(clsm->primary_chunk, WT_LSM_CHUNK_ONDISK));
WT_ASSERT(session,
+ clsm->primary_chunk != NULL &&
+ !F_ISSET_ATOMIC(clsm->primary_chunk, WT_LSM_CHUNK_ONDISK) &&
TXNID_LE(session->txn.id, clsm->primary_chunk->txnid_max));
/*
diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c
index 67a063aafc1..dd84df01f7e 100644
--- a/src/lsm/lsm_merge.c
+++ b/src/lsm/lsm_merge.c
@@ -106,8 +106,9 @@ __wt_lsm_merge(
*/
end_chunk = lsm_tree->nchunks - 1;
while (end_chunk > 0 &&
- (!F_ISSET(lsm_tree->chunk[end_chunk], WT_LSM_CHUNK_BLOOM) ||
- F_ISSET(lsm_tree->chunk[end_chunk], WT_LSM_CHUNK_MERGING)))
+ ((chunk = lsm_tree->chunk[end_chunk]) == NULL ||
+ !F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM) ||
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_MERGING)))
--end_chunk;
/*
@@ -140,7 +141,7 @@ __wt_lsm_merge(
nchunks = (end_chunk + 1) - start_chunk;
/* If the chunk is already involved in a merge, stop. */
- if (F_ISSET(chunk, WT_LSM_CHUNK_MERGING))
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_MERGING))
break;
/*
@@ -171,12 +172,12 @@ __wt_lsm_merge(
break;
}
- F_SET(chunk, WT_LSM_CHUNK_MERGING);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_MERGING);
record_count += chunk->count;
--start_chunk;
if (nchunks == lsm_tree->merge_max) {
- F_CLR(youngest, WT_LSM_CHUNK_MERGING);
+ F_CLR_ATOMIC(youngest, WT_LSM_CHUNK_MERGING);
record_count -= youngest->count;
chunk_size -= youngest->size;
--end_chunk;
@@ -199,7 +200,7 @@ __wt_lsm_merge(
chunk->generation >
youngest->generation + max_generation_gap) {
for (i = 0; i < nchunks; i++)
- F_CLR(lsm_tree->chunk[start_chunk + i],
+ F_CLR_ATOMIC(lsm_tree->chunk[start_chunk + i],
WT_LSM_CHUNK_MERGING);
nchunks = 0;
}
@@ -353,10 +354,10 @@ __wt_lsm_merge(
session, lsm_tree, start_chunk, nchunks, chunk);
if (create_bloom)
- F_SET(chunk, WT_LSM_CHUNK_BLOOM);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM);
chunk->count = insert_count;
chunk->generation = generation;
- F_SET(chunk, WT_LSM_CHUNK_ONDISK);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK);
ret = __wt_lsm_meta_write(session, lsm_tree);
lsm_tree->dsk_gen++;
diff --git a/src/lsm/lsm_meta.c b/src/lsm/lsm_meta.c
index e9951fae099..60c83bb7877 100644
--- a/src/lsm/lsm_meta.c
+++ b/src/lsm/lsm_meta.c
@@ -100,7 +100,8 @@ __wt_lsm_meta_read(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
lsm_tree, chunk->id, &buf));
chunk->uri =
__wt_buf_steal(session, &buf, NULL);
- F_SET(chunk, WT_LSM_CHUNK_EVICTED |
+ F_SET_ATOMIC(chunk,
+ WT_LSM_CHUNK_EVICTED |
WT_LSM_CHUNK_ONDISK |
WT_LSM_CHUNK_STABLE);
} else if (WT_STRING_MATCH(
@@ -109,7 +110,7 @@ __wt_lsm_meta_read(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
lsm_tree, chunk->id, &buf));
chunk->bloom_uri =
__wt_buf_steal(session, &buf, NULL);
- F_SET(chunk, WT_LSM_CHUNK_BLOOM);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM);
continue;
} else if (WT_STRING_MATCH(
"chunk_size", lk.str, lk.len)) {
@@ -134,7 +135,7 @@ __wt_lsm_meta_read(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
if (WT_STRING_MATCH("bloom", lk.str, lk.len)) {
WT_ERR(__wt_strndup(session,
lv.str, lv.len, &chunk->bloom_uri));
- F_SET(chunk, WT_LSM_CHUNK_BLOOM);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM);
continue;
}
WT_ERR(__wt_realloc_def(session,
@@ -144,7 +145,7 @@ __wt_lsm_meta_read(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
lsm_tree->old_chunks[nchunks++] = chunk;
WT_ERR(__wt_strndup(session,
lk.str, lk.len, &chunk->uri));
- F_SET(chunk, WT_LSM_CHUNK_ONDISK);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK);
}
WT_ERR_NOTFOUND_OK(ret);
lsm_tree->nold_chunks = nchunks;
@@ -199,7 +200,7 @@ __wt_lsm_meta_write(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
if (i > 0)
WT_ERR(__wt_buf_catfmt(session, buf, ","));
WT_ERR(__wt_buf_catfmt(session, buf, "id=%" PRIu32, chunk->id));
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
WT_ERR(__wt_buf_catfmt(session, buf, ",bloom"));
if (chunk->size != 0)
WT_ERR(__wt_buf_catfmt(session, buf,
@@ -221,7 +222,7 @@ __wt_lsm_meta_write(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
else
WT_ERR(__wt_buf_catfmt(session, buf, ","));
WT_ERR(__wt_buf_catfmt(session, buf, "\"%s\"", chunk->uri));
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
WT_ERR(__wt_buf_catfmt(
session, buf, ",bloom=\"%s\"", chunk->bloom_uri));
}
diff --git a/src/lsm/lsm_stat.c b/src/lsm/lsm_stat.c
index cb3d2bba2ac..8bea73624a0 100644
--- a/src/lsm/lsm_stat.c
+++ b/src/lsm/lsm_stat.c
@@ -71,9 +71,10 @@ __lsm_stat_init(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR_STAT *cst)
WT_ERR(__wt_buf_fmt(
session, uribuf, "statistics:%s", chunk->uri));
ret = __wt_curstat_open(session, uribuf->data,
- F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) ? disk_cfg : cfg,
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK) ? disk_cfg : cfg,
&stat_cursor);
- if (ret == WT_NOTFOUND && F_ISSET(chunk, WT_LSM_CHUNK_ONDISK))
+ if (ret == WT_NOTFOUND &&
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK))
ret = __wt_curstat_open(
session, uribuf->data, cfg, &stat_cursor);
WT_ERR(ret);
@@ -97,7 +98,7 @@ __lsm_stat_init(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR_STAT *cst)
__wt_stat_aggregate_dsrc_stats(new, stats);
WT_ERR(stat_cursor->close(stat_cursor));
- if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ if (!F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
continue;
/* Maintain a count of bloom filters. */
diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c
index 3502845ee48..82bc662aa97 100644
--- a/src/lsm/lsm_tree.c
+++ b/src/lsm/lsm_tree.c
@@ -587,11 +587,11 @@ __wt_lsm_tree_throttle(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
for (i = in_memory = 0, cp = lsm_tree->chunk + lsm_tree->nchunks - 1;
i < lsm_tree->nchunks;
++i, --cp)
- if (!F_ISSET(*cp, WT_LSM_CHUNK_ONDISK)) {
+ if (!F_ISSET_ATOMIC(*cp, WT_LSM_CHUNK_ONDISK)) {
record_count += (*cp)->count;
++in_memory;
} else if ((*cp)->generation == 0 ||
- F_ISSET(*cp, WT_LSM_CHUNK_STABLE))
+ F_ISSET_ATOMIC(*cp, WT_LSM_CHUNK_STABLE))
break;
chunk = lsm_tree->chunk[lsm_tree->nchunks - 1];
@@ -599,7 +599,7 @@ __wt_lsm_tree_throttle(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
if (!F_ISSET(lsm_tree, WT_LSM_TREE_THROTTLE) || in_memory <= 3)
lsm_tree->throttle_sleep = 0;
else if (i == lsm_tree->nchunks ||
- F_ISSET(*cp, WT_LSM_CHUNK_STABLE)) {
+ F_ISSET_ATOMIC(*cp, WT_LSM_CHUNK_STABLE)) {
/*
* No checkpoint has completed this run. Keep slowing down
* inserts until one does.
@@ -635,7 +635,8 @@ __wt_lsm_tree_throttle(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* period, we can calculate a crazy value.
*/
if (in_memory > 1 &&
- i != lsm_tree->nchunks && !F_ISSET(*cp, WT_LSM_CHUNK_STABLE)) {
+ i != lsm_tree->nchunks &&
+ !F_ISSET_ATOMIC(*cp, WT_LSM_CHUNK_STABLE)) {
prev_chunk = lsm_tree->chunk[lsm_tree->nchunks - 2];
WT_ASSERT(session, prev_chunk->generation == 0);
WT_ASSERT(session,
@@ -669,7 +670,8 @@ __wt_lsm_tree_switch(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* for a lock.
*/
if ((nchunks = lsm_tree->nchunks) != 0 &&
- !F_ISSET(lsm_tree->chunk[nchunks - 1], WT_LSM_CHUNK_ONDISK) &&
+ (chunk = lsm_tree->chunk[nchunks - 1]) != NULL &&
+ !F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK) &&
!F_ISSET(lsm_tree, WT_LSM_TREE_NEED_SWITCH))
goto err;
@@ -732,7 +734,7 @@ __wt_lsm_tree_drop(
for (i = 0; i < lsm_tree->nchunks; i++) {
chunk = lsm_tree->chunk[i];
WT_ERR(__wt_schema_drop(session, chunk->uri, cfg));
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
WT_ERR(
__wt_schema_drop(session, chunk->bloom_uri, cfg));
}
@@ -742,7 +744,7 @@ __wt_lsm_tree_drop(
if ((chunk = lsm_tree->old_chunks[i]) == NULL)
continue;
WT_ERR(__wt_schema_drop(session, chunk->uri, cfg));
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
WT_ERR(
__wt_schema_drop(session, chunk->bloom_uri, cfg));
}
@@ -802,13 +804,13 @@ __wt_lsm_tree_rename(WT_SESSION_IMPL *session,
WT_ERR(__wt_schema_rename(session, old, chunk->uri, cfg));
__wt_free(session, old);
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) {
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM)) {
old = chunk->bloom_uri;
chunk->bloom_uri = NULL;
WT_ERR(__wt_lsm_tree_bloom_name(
session, lsm_tree, chunk->id, &buf));
chunk->bloom_uri = __wt_buf_steal(session, &buf, NULL);
- F_SET(chunk, WT_LSM_CHUNK_BLOOM);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM);
WT_ERR(__wt_schema_rename(
session, old, chunk->uri, cfg));
__wt_free(session, old);
@@ -1006,12 +1008,12 @@ __wt_lsm_tree_worker(WT_SESSION_IMPL *session,
for (i = 0; i < lsm_tree->nchunks; i++) {
chunk = lsm_tree->chunk[i];
if (file_func == __wt_checkpoint &&
- F_ISSET(chunk, WT_LSM_CHUNK_ONDISK))
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK))
continue;
WT_ERR(__wt_schema_worker(session, chunk->uri,
file_func, name_func, cfg, open_flags));
if (name_func == __wt_backup_list_uri_append &&
- F_ISSET(chunk, WT_LSM_CHUNK_BLOOM))
+ F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM))
WT_ERR(__wt_schema_worker(session, chunk->bloom_uri,
file_func, name_func, cfg, open_flags));
}
diff --git a/src/lsm/lsm_worker.c b/src/lsm/lsm_worker.c
index 3cfb4119b85..6f2f3651201 100644
--- a/src/lsm/lsm_worker.c
+++ b/src/lsm/lsm_worker.c
@@ -204,8 +204,9 @@ __lsm_bloom_work(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
* Skip if a thread is still active in the chunk or it
* isn't suitable.
*/
- if (!F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) ||
- F_ISSET(chunk, WT_LSM_CHUNK_BLOOM | WT_LSM_CHUNK_MERGING) ||
+ if (!F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK) ||
+ F_ISSET_ATOMIC(chunk,
+ WT_LSM_CHUNK_BLOOM | WT_LSM_CHUNK_MERGING) ||
chunk->generation > 0 ||
chunk->count == 0)
continue;
@@ -275,13 +276,14 @@ __wt_lsm_checkpoint_worker(void *arg)
* is also evicted. Either way, there is no point
* trying to checkpoint it again.
*/
- if (F_ISSET(chunk, WT_LSM_CHUNK_ONDISK)) {
- if (F_ISSET(chunk, WT_LSM_CHUNK_EVICTED))
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK)) {
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_EVICTED))
continue;
if ((ret = __lsm_discard_handle(
session, chunk->uri, NULL)) == 0)
- F_SET(chunk, WT_LSM_CHUNK_EVICTED);
+ F_SET_ATOMIC(
+ chunk, WT_LSM_CHUNK_EVICTED);
else if (ret == EBUSY)
ret = 0;
else
@@ -367,7 +369,7 @@ __wt_lsm_checkpoint_worker(void *arg)
++j;
WT_ERR(__wt_lsm_tree_lock(session, lsm_tree, 1));
- F_SET(chunk, WT_LSM_CHUNK_ONDISK);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_ONDISK);
ret = __wt_lsm_meta_write(session, lsm_tree);
++lsm_tree->dsk_gen;
@@ -483,7 +485,7 @@ __lsm_bloom_create(WT_SESSION_IMPL *session,
/* Ensure the bloom filter is in the metadata. */
WT_ERR(__wt_lsm_tree_lock(session, lsm_tree, 1));
- F_SET(chunk, WT_LSM_CHUNK_BLOOM);
+ F_SET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM);
ret = __wt_lsm_meta_write(session, lsm_tree);
++lsm_tree->dsk_gen;
WT_TRET(__wt_lsm_tree_unlock(session, lsm_tree));
@@ -606,7 +608,7 @@ __lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
continue;
}
- if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) {
+ if (F_ISSET_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM)) {
/*
* An EBUSY return is acceptable - a cursor may still
* be positioned on this old chunk.
@@ -621,7 +623,7 @@ __lsm_free_chunks(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
} else
WT_ERR(ret);
- F_CLR(chunk, WT_LSM_CHUNK_BLOOM);
+ F_CLR_ATOMIC(chunk, WT_LSM_CHUNK_BLOOM);
}
if (chunk->uri != NULL) {
/*