summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <andrew.morton@mongodb.com>2023-05-16 22:21:25 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-05-17 00:50:01 +0000
commit376eae9eefe9f752d8d9b5189528f1353c019e54 (patch)
tree8780daf7a9ebd0ed2e8c1b25bf81571b3bb89911
parente94168acd89dba4faef5e43f970633a781e8cba2 (diff)
downloadmongo-376eae9eefe9f752d8d9b5189528f1353c019e54.tar.gz
Import wiredtiger: 04dcc362042b8cc35fdd8de8343be3a15d4fa47c from branch mongodb-master
ref: d310d6330e..04dcc36204 for: 7.1.0-rc0 WT-10898 Add assertions for owning a spinlock
-rw-r--r--src/third_party/wiredtiger/dist/s_funcs.list2
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/block/block_ckpt.c18
-rw-r--r--src/third_party/wiredtiger/src/block/block_compact.c4
-rw-r--r--src/third_party/wiredtiger/src/block/block_ext.c16
-rw-r--r--src/third_party/wiredtiger/src/block/block_write.c8
-rw-r--r--src/third_party/wiredtiger/src/block_cache/block_chunkcache.c5
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_compact.c8
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_sync.c2
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_sync_obsolete.c1
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_upgrade.c3
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_vrfy.c3
-rw-r--r--src/third_party/wiredtiger/src/conn/conn_cache_pool.c34
-rw-r--r--src/third_party/wiredtiger/src/conn/conn_log.c7
-rw-r--r--src/third_party/wiredtiger/src/conn/conn_tiered.c3
-rw-r--r--src/third_party/wiredtiger/src/cursor/cur_backup.c6
-rw-r--r--src/third_party/wiredtiger/src/evict/evict_lru.c6
-rw-r--r--src/third_party/wiredtiger/src/log/log.c15
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_cursor.c2
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_stat.c2
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_tree.c7
-rw-r--r--src/third_party/wiredtiger/src/lsm/lsm_work_unit.c3
-rw-r--r--src/third_party/wiredtiger/src/meta/meta_turtle.c2
-rw-r--r--src/third_party/wiredtiger/src/optrack/optrack.c6
-rw-r--r--src/third_party/wiredtiger/src/os_common/os_fhandle.c1
-rw-r--r--src/third_party/wiredtiger/src/os_common/os_fs_inmemory.c8
-rw-r--r--src/third_party/wiredtiger/src/reconcile/rec_write.c6
-rw-r--r--src/third_party/wiredtiger/src/rollback_to_stable/rts_api.c3
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_alter.c6
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_create.c9
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_drop.c20
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_rename.c10
-rw-r--r--src/third_party/wiredtiger/src/schema/schema_truncate.c3
-rw-r--r--src/third_party/wiredtiger/src/session/session_api.c3
-rw-r--r--src/third_party/wiredtiger/src/session/session_compact.c2
-rw-r--r--src/third_party/wiredtiger/src/txn/txn_ckpt.c11
36 files changed, 189 insertions, 58 deletions
diff --git a/src/third_party/wiredtiger/dist/s_funcs.list b/src/third_party/wiredtiger/dist/s_funcs.list
index 7ac890052d6..1b831580e27 100644
--- a/src/third_party/wiredtiger/dist/s_funcs.list
+++ b/src/third_party/wiredtiger/dist/s_funcs.list
@@ -31,8 +31,6 @@ __wt_nlpo2
__wt_nlpo2_round
__wt_print_huffman_code
__wt_session_breakpoint
-__wt_spin_locked
-__wt_spin_unlock_if_owned
__wt_stat_join_aggregate
__wt_stat_join_clear_all
__wt_strcat
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index 426b41c6c48..76fb2781f75 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-master",
- "commit": "d310d6330ea5b23835d9eb231f65bb9f91bdef21"
+ "commit": "04dcc362042b8cc35fdd8de8343be3a15d4fa47c"
}
diff --git a/src/third_party/wiredtiger/src/block/block_ckpt.c b/src/third_party/wiredtiger/src/block/block_ckpt.c
index 5aa945b9350..b734227647f 100644
--- a/src/third_party/wiredtiger/src/block/block_ckpt.c
+++ b/src/third_party/wiredtiger/src/block/block_ckpt.c
@@ -218,6 +218,11 @@ __wt_block_checkpoint_unload(WT_SESSION_IMPL *session, WT_BLOCK *block, bool che
void
__wt_block_ckpt_destroy(WT_SESSION_IMPL *session, WT_BLOCK_CKPT *ci)
{
+ /*
+ * We should hold the live lock here when running on the live checkpoint. But there is no easy
+ * way to determine if the checkpoint is live so we cannot assert the locking here.
+ */
+
/* Discard the extent lists. */
__wt_block_extlist_free(session, &ci->alloc);
__wt_block_extlist_free(session, &ci->avail);
@@ -458,6 +463,9 @@ __ckpt_add_blk_mods_alloc(
WT_EXT *ext;
u_int i;
+ if (&block->live == ci)
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
WT_CKPT_FOREACH (ckptbase, ckpt) {
if (F_ISSET(ckpt, WT_CKPT_ADD))
break;
@@ -526,10 +534,10 @@ __ckpt_process(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_CKPT *ckptbase)
WT_CKPT *ckpt, *next_ckpt;
WT_DECL_RET;
uint64_t ckpt_size;
- bool deleting, fatal, local, locked;
+ bool deleting, fatal, local;
ci = &block->live;
- fatal = locked = false;
+ fatal = false;
if (EXTRA_DIAGNOSTICS_ENABLED(session, WT_DIAGNOSTIC_CHECKPOINT_VALIDATE))
WT_RET(__ckpt_verify(session, ckptbase));
@@ -647,7 +655,6 @@ __ckpt_process(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_CKPT *ckptbase)
* ranges into the live tree.
*/
__wt_spin_lock(session, &block->live_lock);
- locked = true;
/*
* We've allocated our last page, update the checkpoint size. We need to calculate the live
@@ -828,8 +835,7 @@ err:
__wt_blkcache_set_readonly(session);
}
- if (locked)
- __wt_spin_unlock(session, &block->live_lock);
+ __wt_spin_unlock_if_owned(session, &block->live_lock);
/* Discard any checkpoint information we loaded. */
WT_CKPT_FOREACH (ckptbase, ckpt)
@@ -853,6 +859,8 @@ __ckpt_update(
bool is_live;
is_live = F_ISSET(ckpt, WT_CKPT_ADD);
+ if (is_live)
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
#ifdef HAVE_DIAGNOSTIC
/* Check the extent list combinations for overlaps. */
diff --git a/src/third_party/wiredtiger/src/block/block_compact.c b/src/third_party/wiredtiger/src/block/block_compact.c
index fcdd3cf7bab..1e5edf6a2dc 100644
--- a/src/third_party/wiredtiger/src/block/block_compact.c
+++ b/src/third_party/wiredtiger/src/block/block_compact.c
@@ -123,7 +123,7 @@ __block_compact_skip_internal(WT_SESSION_IMPL *session, WT_BLOCK *block, bool es
WT_EXT *ext;
wt_off_t avail_eighty, avail_ninety, off, size, eighty, ninety;
- /* IMPORTANT: We assume here that block->live_lock is locked. */
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
/* Sum the available bytes in the initial 80% and 90% of the file. */
avail_eighty = avail_ninety = avail_bytes_before_start_offset;
@@ -654,6 +654,8 @@ __block_dump_file_stat(WT_SESSION_IMPL *session, WT_BLOCK *block, bool start)
uintmax_t bucket_size;
u_int i;
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
el = &block->live.avail;
size = block->size;
diff --git a/src/third_party/wiredtiger/src/block/block_ext.c b/src/third_party/wiredtiger/src/block/block_ext.c
index 618add993cf..f7917a75845 100644
--- a/src/third_party/wiredtiger/src/block/block_ext.c
+++ b/src/third_party/wiredtiger/src/block/block_ext.c
@@ -503,6 +503,9 @@ __wt_block_alloc(WT_SESSION_IMPL *session, WT_BLOCK *block, wt_off_t *offp, wt_o
WT_EXT *ext, **estack[WT_SKIP_MAXDEPTH];
WT_SIZE *szp, **sstack[WT_SKIP_MAXDEPTH];
+ /* The live lock must be locked. */
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
/* If a sync is running, no other sessions can allocate blocks. */
WT_ASSERT(session, WT_SESSION_BTREE_SYNC_SAFE(session, S2BT(session)));
@@ -628,6 +631,10 @@ __wt_block_off_free(
{
WT_DECL_RET;
+ /* The live lock must be locked, except for when we are running salvage. */
+ if (!F_ISSET(S2BT(session), WT_BTREE_SALVAGE))
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
/* If a sync is running, no other sessions can free blocks. */
WT_ASSERT(session, WT_SESSION_BTREE_SYNC_SAFE(session, S2BT(session)));
@@ -695,6 +702,8 @@ __wt_block_extlist_overlap(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_BLOCK_C
{
WT_EXT *alloc, *discard;
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
alloc = ci->alloc.off[0];
discard = ci->discard.off[0];
@@ -730,6 +739,8 @@ __block_ext_overlap(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_EXTLIST *ael,
WT_EXTLIST *avail, *el;
wt_off_t off, size;
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
avail = &block->live.ckpt_avail;
/*
@@ -904,6 +915,11 @@ __wt_block_extlist_merge(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_EXTLIST *
WT_EXTLIST tmp;
u_int i;
+ /*
+ * We should hold the live lock here when running on the live checkpoint. But there is no easy
+ * way to determine if the checkpoint is live so we cannot assert the locking here.
+ */
+
__wt_verbose_debug2(session, WT_VERB_BLOCK, "merging %s into %s", a->name, b->name);
/*
diff --git a/src/third_party/wiredtiger/src/block/block_write.c b/src/third_party/wiredtiger/src/block/block_write.c
index 75ddd53001b..b2ba9a35ee3 100644
--- a/src/third_party/wiredtiger/src/block/block_write.c
+++ b/src/third_party/wiredtiger/src/block/block_write.c
@@ -83,16 +83,18 @@ __wt_block_discard(WT_SESSION_IMPL *session, WT_BLOCK *block, size_t added_size)
}
/*
- * __wt_block_extend --
+ * __block_extend --
* Extend the file.
*/
static inline int
-__wt_block_extend(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_FH *fh, wt_off_t offset,
+__block_extend(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_FH *fh, wt_off_t offset,
size_t align_size, bool *release_lockp)
{
WT_DECL_RET;
WT_FILE_HANDLE *handle;
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
/*
* The locking in this function is messy: by definition, the live system is locked when we're
* called, but that lock may have been acquired by our caller or our caller's caller. If our
@@ -272,7 +274,7 @@ __block_write_off(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_ITEM *buf, uint3
}
ret = __wt_block_alloc(session, block, &offset, (wt_off_t)align_size);
if (ret == 0)
- ret = __wt_block_extend(session, block, fh, offset, align_size, &local_locked);
+ ret = __block_extend(session, block, fh, offset, align_size, &local_locked);
if (local_locked)
__wt_spin_unlock(session, &block->live_lock);
WT_RET(ret);
diff --git a/src/third_party/wiredtiger/src/block_cache/block_chunkcache.c b/src/third_party/wiredtiger/src/block_cache/block_chunkcache.c
index a09aa682369..5f3d615071d 100644
--- a/src/third_party/wiredtiger/src/block_cache/block_chunkcache.c
+++ b/src/third_party/wiredtiger/src/block_cache/block_chunkcache.c
@@ -121,8 +121,7 @@ __chunkcache_alloc_chunk(WT_SESSION_IMPL *session, wt_off_t offset, WT_BLOCK *bl
hash = __wt_hash_city64((void *)hash_id, sizeof(WT_CHUNKCACHE_HASHID));
(*newchunk)->bucket_id = hash % chunkcache->hashtable_size;
- WT_ASSERT(
- session, __wt_spin_trylock(session, WT_BUCKET_LOCK(chunkcache, (*newchunk)->bucket_id)) != 0);
+ WT_ASSERT_SPINLOCK_OWNED(session, WT_BUCKET_LOCK(chunkcache, (*newchunk)->bucket_id));
if ((ret = __chunkcache_alloc(session, *newchunk)) != 0) {
__wt_free(session, *newchunk);
@@ -409,6 +408,8 @@ __wt_chunkcache_remove(
uint64_t bucket_id;
bool done;
+ WT_ASSERT_SPINLOCK_OWNED(session, &block->live_lock);
+
chunkcache = &S2C(session)->chunkcache;
already_removed = 0;
remains_to_remove = size;
diff --git a/src/third_party/wiredtiger/src/btree/bt_compact.c b/src/third_party/wiredtiger/src/btree/bt_compact.c
index 894e49c45b4..6e5fcc78353 100644
--- a/src/third_party/wiredtiger/src/btree/bt_compact.c
+++ b/src/third_party/wiredtiger/src/btree/bt_compact.c
@@ -21,6 +21,8 @@ __compact_page_inmem_check_addrs(WT_SESSION_IMPL *session, WT_REF *ref, bool *sk
WT_PAGE_MODIFY *mod;
uint32_t i;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2BT(session)->flush_lock);
+
*skipp = true; /* Default skip. */
bm = S2BT(session)->bm;
@@ -61,6 +63,8 @@ __compact_page_inmem(WT_SESSION_IMPL *session, WT_REF *ref, bool *skipp)
{
*skipp = true; /* Default skip. */
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2BT(session)->flush_lock);
+
/*
* Ignore dirty pages, checkpoint will likely write them. There are cases where checkpoint can
* skip dirty pages: to avoid that, we could alter the transactional information of the page,
@@ -99,6 +103,8 @@ __compact_page_replace_addr(WT_SESSION_IMPL *session, WT_REF *ref, WT_ADDR_COPY
WT_CELL_UNPACK_ADDR unpack;
WT_DECL_RET;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2BT(session)->flush_lock);
+
/*
* If there's no address at all (the page has never been written), allocate a new WT_ADDR
* structure, otherwise, the address has already been instantiated, replace the cookie.
@@ -161,6 +167,8 @@ __compact_page(WT_SESSION_IMPL *session, WT_REF *ref, bool *skipp)
*skipp = true; /* Default skip. */
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2BT(session)->flush_lock);
+
/* Lock the WT_REF. */
WT_REF_LOCK(session, ref, &previous_state);
diff --git a/src/third_party/wiredtiger/src/btree/bt_sync.c b/src/third_party/wiredtiger/src/btree/bt_sync.c
index a7d999a766b..f052547b4a1 100644
--- a/src/third_party/wiredtiger/src/btree/bt_sync.c
+++ b/src/third_party/wiredtiger/src/btree/bt_sync.c
@@ -20,6 +20,8 @@ __sync_checkpoint_can_skip(WT_SESSION_IMPL *session, WT_REF *ref)
WT_TXN *txn;
u_int i;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2BT(session)->flush_lock);
+
mod = ref->page->modify;
txn = session->txn;
diff --git a/src/third_party/wiredtiger/src/btree/bt_sync_obsolete.c b/src/third_party/wiredtiger/src/btree/bt_sync_obsolete.c
index 3f51e3783a4..959ec9b2285 100644
--- a/src/third_party/wiredtiger/src/btree/bt_sync_obsolete.c
+++ b/src/third_party/wiredtiger/src/btree/bt_sync_obsolete.c
@@ -273,6 +273,7 @@ __wt_sync_obsolete_cleanup(WT_SESSION_IMPL *session, WT_REF *parent)
WT_ASSERT_ALWAYS(session, WT_PAGE_IS_INTERNAL(parent->page),
"Checkpoint obsolete cleanup requires an internal page");
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2BT(session)->flush_lock);
__wt_verbose_debug2(session, WT_VERB_CHECKPOINT_CLEANUP,
"%p: traversing the internal page %p for obsolete child pages", (void *)parent,
diff --git a/src/third_party/wiredtiger/src/btree/bt_upgrade.c b/src/third_party/wiredtiger/src/btree/bt_upgrade.c
index de9a6bf7574..1e9b8d51d49 100644
--- a/src/third_party/wiredtiger/src/btree/bt_upgrade.c
+++ b/src/third_party/wiredtiger/src/btree/bt_upgrade.c
@@ -17,6 +17,9 @@ __wt_upgrade(WT_SESSION_IMPL *session, const char *cfg[])
{
WT_UNUSED(cfg);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
/* There's nothing to upgrade, yet. */
WT_RET(__wt_progress(session, NULL, 1));
return (0);
diff --git a/src/third_party/wiredtiger/src/btree/bt_vrfy.c b/src/third_party/wiredtiger/src/btree/bt_vrfy.c
index 938d8bea88c..d2d95584c73 100644
--- a/src/third_party/wiredtiger/src/btree/bt_vrfy.c
+++ b/src/third_party/wiredtiger/src/btree/bt_vrfy.c
@@ -189,6 +189,9 @@ __wt_verify(WT_SESSION_IMPL *session, const char *cfg[])
const char *name;
bool bm_start, quit, skip_hs;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
btree = S2BT(session);
bm = btree->bm;
ckptbase = NULL;
diff --git a/src/third_party/wiredtiger/src/conn/conn_cache_pool.c b/src/third_party/wiredtiger/src/conn/conn_cache_pool.c
index 5e8b2237d30..878dbd43c50 100644
--- a/src/third_party/wiredtiger/src/conn/conn_cache_pool.c
+++ b/src/third_party/wiredtiger/src/conn/conn_cache_pool.c
@@ -50,10 +50,10 @@ __wt_cache_pool_config(WT_SESSION_IMPL *session, const char **cfg)
WT_DECL_RET;
uint64_t chunk, quota, reserve, size, used_cache;
char *pool_name;
- bool cp_locked, created, updating;
+ bool cp_lock_available, created, updating;
conn = S2C(session);
- cp_locked = created = updating = false;
+ cp_lock_available = created = updating = false;
pool_name = NULL;
cp = NULL;
@@ -103,13 +103,18 @@ __wt_cache_pool_config(WT_SESSION_IMPL *session, const char **cfg)
session, WT_ERROR, "Attempting to join a cache pool that does not exist: %s", pool_name);
/*
+ * The cache pool lock now exists for sure, and we may now lock it. Remember this so that we can
+ * use __wt_spin_unlock_if_owned at the end of the function.
+ */
+ cp_lock_available = true;
+
+ /*
* At this point we have a cache pool to use. We need to take its lock. We need to drop the
* process lock first to avoid deadlock and acquire in the proper order.
*/
__wt_spin_unlock(session, &__wt_process.spinlock);
cp = __wt_process.cache_pool;
__wt_spin_lock(session, &cp->cache_pool_lock);
- cp_locked = true;
__wt_spin_lock(session, &__wt_process.spinlock);
/*
@@ -194,7 +199,6 @@ __wt_cache_pool_config(WT_SESSION_IMPL *session, const char **cfg)
conn->cache->cp_reserved = reserve;
conn->cache->cp_quota = quota;
__wt_spin_unlock(session, &cp->cache_pool_lock);
- cp_locked = false;
/* Wake up the cache pool server so any changes are noticed. */
if (updating)
@@ -207,8 +211,9 @@ __wt_cache_pool_config(WT_SESSION_IMPL *session, const char **cfg)
F_SET(conn, WT_CONN_CACHE_POOL);
err:
__wt_spin_unlock(session, &__wt_process.spinlock);
- if (cp_locked)
- __wt_spin_unlock(session, &cp->cache_pool_lock);
+
+ if (cp_lock_available)
+ __wt_spin_unlock_if_owned(session, &cp->cache_pool_lock);
__wt_free(session, pool_name);
if (ret != 0 && created) {
__wt_free(session, cp->name);
@@ -281,11 +286,11 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session)
WT_CACHE_POOL *cp;
WT_CONNECTION_IMPL *conn, *entry;
WT_DECL_RET;
- bool cp_locked, found;
+ bool cp_lock_available, found;
conn = S2C(session);
cache = conn->cache;
- WT_NOT_READ(cp_locked, false);
+ cp_lock_available = true;
found = false;
cp = __wt_process.cache_pool;
@@ -294,7 +299,6 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session)
F_CLR(conn, WT_CONN_CACHE_POOL);
__wt_spin_lock(session, &cp->cache_pool_lock);
- cp_locked = true;
TAILQ_FOREACH (entry, &cp->cache_pool_qh, cpq)
if (entry == conn) {
found = true;
@@ -318,7 +322,6 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session)
* it to complete any balance operation.
*/
__wt_spin_unlock(session, &cp->cache_pool_lock);
- WT_NOT_READ(cp_locked, false);
FLD_CLR_ATOMIC_16(cache->pool_flags_atomic, WT_CACHE_POOL_RUN);
__wt_cond_signal(session, cp->cache_pool_cond);
@@ -331,7 +334,6 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session)
* whether we were the last participant.
*/
__wt_spin_lock(session, &cp->cache_pool_lock);
- cp_locked = true;
}
/*
@@ -339,8 +341,7 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session)
* nothing further to do.
*/
if (cp->refs < 1) {
- if (cp_locked)
- __wt_spin_unlock(session, &cp->cache_pool_lock);
+ __wt_spin_unlock_if_owned(session, &cp->cache_pool_lock);
return (0);
}
@@ -359,17 +360,17 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session)
__wt_process.cache_pool = NULL;
__wt_spin_unlock(session, &__wt_process.spinlock);
__wt_spin_unlock(session, &cp->cache_pool_lock);
- cp_locked = false;
/* Now free the pool. */
__wt_free(session, cp->name);
+ cp_lock_available = false;
__wt_spin_destroy(session, &cp->cache_pool_lock);
__wt_cond_destroy(session, &cp->cache_pool_cond);
__wt_free(session, cp);
}
- if (cp_locked) {
+ if (cp_lock_available && __wt_spin_owned(session, &cp->cache_pool_lock)) {
__wt_spin_unlock(session, &cp->cache_pool_lock);
/* Notify other participants if we were managing */
@@ -546,6 +547,9 @@ __cache_pool_adjust(WT_SESSION_IMPL *session, uint64_t highest, uint64_t bump_th
/* Highest as a percentage, avoid 0 */
highest_percentile = (highest / 100) + 1;
+ /* The cache pool lock is locked using the NULL session. */
+ WT_ASSERT_SPINLOCK_OWNED(NULL, &cp->cache_pool_lock);
+
if (WT_VERBOSE_ISSET(session, WT_VERB_SHARED_CACHE)) {
__wt_verbose(session, WT_VERB_SHARED_CACHE, "%s", "Cache pool distribution: ");
__wt_verbose(session, WT_VERB_SHARED_CACHE, "%s",
diff --git a/src/third_party/wiredtiger/src/conn/conn_log.c b/src/third_party/wiredtiger/src/conn/conn_log.c
index 68346a0fe2d..20dcc338abe 100644
--- a/src/third_party/wiredtiger/src/conn/conn_log.c
+++ b/src/third_party/wiredtiger/src/conn/conn_log.c
@@ -567,12 +567,10 @@ __log_file_server(void *arg)
WT_LSN close_end_lsn;
WT_SESSION_IMPL *session;
uint32_t filenum;
- bool locked;
session = arg;
conn = S2C(session);
log = conn->log;
- locked = false;
while (FLD_ISSET(conn->server_flags, WT_CONN_SERVER_LOG)) {
/*
* If there is a log file to close, make sure any outstanding write operations have
@@ -615,12 +613,10 @@ __log_file_server(void *arg)
}
WT_SET_LSN(&close_end_lsn, close_end_lsn.l.file + 1, 0);
__wt_spin_lock(session, &log->log_sync_lock);
- locked = true;
WT_ERR(__wt_close(session, &close_fh));
WT_ASSERT(session, __wt_log_cmp(&close_end_lsn, &log->sync_lsn) >= 0);
WT_ASSIGN_LSN(&log->sync_lsn, &close_end_lsn);
__wt_cond_signal(session, log->log_sync_cond);
- locked = false;
__wt_spin_unlock(session, &log->log_sync_lock);
}
}
@@ -633,8 +629,7 @@ __log_file_server(void *arg)
err:
WT_IGNORE_RET(__wt_panic(session, ret, "log close server error"));
}
- if (locked)
- __wt_spin_unlock(session, &log->log_sync_lock);
+ __wt_spin_unlock_if_owned(session, &log->log_sync_lock);
return (WT_THREAD_RET_VALUE);
}
diff --git a/src/third_party/wiredtiger/src/conn/conn_tiered.c b/src/third_party/wiredtiger/src/conn/conn_tiered.c
index 122e0583e4d..7e7668656f5 100644
--- a/src/third_party/wiredtiger/src/conn/conn_tiered.c
+++ b/src/third_party/wiredtiger/src/conn/conn_tiered.c
@@ -112,6 +112,9 @@ __tier_flush_meta(
WT_RET(__wt_scr_alloc(session, 512, &buf));
dhandle = &tiered->iface;
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->schema_lock);
+
newconfig = obj_value = NULL;
WT_ERR(__wt_meta_track_on(session));
tracking = true;
diff --git a/src/third_party/wiredtiger/src/cursor/cur_backup.c b/src/third_party/wiredtiger/src/cursor/cur_backup.c
index 43c6436b993..ee1bf714570 100644
--- a/src/third_party/wiredtiger/src/cursor/cur_backup.c
+++ b/src/third_party/wiredtiger/src/cursor/cur_backup.c
@@ -475,6 +475,9 @@ __backup_config(WT_SESSION_IMPL *session, WT_CURSOR_BACKUP *cb, const char *cfg[
incremental_config = log_config = false;
is_dup = othercb != NULL;
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->schema_lock);
+
/*
* Per-file offset incremental hot backup configurations take a starting checkpoint and optional
* maximum transfer size, and the subsequent duplicate cursors take a file object.
@@ -680,6 +683,9 @@ __backup_start(
dest = NULL;
is_dup = othercb != NULL;
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->schema_lock);
+
cb->next = 0;
cb->list = NULL;
cb->list_next = 0;
diff --git a/src/third_party/wiredtiger/src/evict/evict_lru.c b/src/third_party/wiredtiger/src/evict/evict_lru.c
index 380e1a642d2..d4cb8a15d72 100644
--- a/src/third_party/wiredtiger/src/evict/evict_lru.c
+++ b/src/third_party/wiredtiger/src/evict/evict_lru.c
@@ -172,6 +172,8 @@ __evict_list_clear_page_locked(WT_SESSION_IMPL *session, WT_REF *ref, bool exclu
cache = S2C(session)->cache;
found = false;
+ WT_ASSERT_SPINLOCK_OWNED(session, &cache->evict_queue_lock);
+
for (q = 0; q < last_queue_idx && !found; q++) {
__wt_spin_lock(session, &cache->evict_queues[q].evict_lock);
elem = cache->evict_queues[q].evict_max;
@@ -406,6 +408,8 @@ __evict_server(WT_SESSION_IMPL *session, bool *did_work)
conn = S2C(session);
cache = conn->cache;
+ WT_ASSERT_SPINLOCK_OWNED(session, &cache->evict_pass_lock);
+
/* Evict pages from the cache as needed. */
WT_RET(__evict_pass(session));
@@ -1756,6 +1760,8 @@ __evict_walk_tree(WT_SESSION_IMPL *session, WT_EVICT_QUEUE *queue, u_int max_ent
restarts = 0;
give_up = urgent_queued = false;
+ WT_ASSERT_SPINLOCK_OWNED(session, &cache->evict_walk_lock);
+
/*
* Figure out how many slots to fill from this tree. Note that some care is taken in the
* calculation to avoid overflow.
diff --git a/src/third_party/wiredtiger/src/log/log.c b/src/third_party/wiredtiger/src/log/log.c
index bf05cee1ab9..9e811a13585 100644
--- a/src/third_party/wiredtiger/src/log/log.c
+++ b/src/third_party/wiredtiger/src/log/log.c
@@ -230,6 +230,7 @@ __log_fsync_dir(WT_SESSION_IMPL *session, WT_LSN *min_lsn, const char *method)
uint64_t fsync_duration_usecs, time_start, time_stop;
log = S2C(session)->log;
+ WT_ASSERT_SPINLOCK_OWNED(session, &log->log_sync_lock);
if (log->sync_dir_lsn.l.file < min_lsn->l.file) {
WT_ASSERT(session, log->log_dir_fh != NULL);
@@ -263,6 +264,7 @@ __log_fsync_file(WT_SESSION_IMPL *session, WT_LSN *min_lsn, const char *method,
log = S2C(session)->log;
log_fh = NULL;
+ WT_ASSERT_SPINLOCK_OWNED(session, &log->log_sync_lock);
if (__wt_log_cmp(&log->sync_lsn, min_lsn) < 0) {
/*
@@ -1080,12 +1082,10 @@ __log_alloc_prealloc(WT_SESSION_IMPL *session, uint32_t to_num)
uint32_t from_num;
u_int logcount;
char **logfiles;
- bool locked;
conn = S2C(session);
log = conn->log;
logfiles = NULL;
- locked = false;
/*
* If there are no pre-allocated files, return WT_NOTFOUND.
@@ -1102,7 +1102,6 @@ __log_alloc_prealloc(WT_SESSION_IMPL *session, uint32_t to_num)
WT_ERR(__wt_log_filename(session, from_num, WT_LOG_PREPNAME, from_path));
WT_ERR(__wt_log_filename(session, to_num, WT_LOG_FILENAME, to_path));
__wt_spin_lock(session, &log->log_fs_lock);
- locked = true;
__wt_verbose(session, WT_VERB_LOG, "log_alloc_prealloc: rename log %s to %s",
(const char *)from_path->data, (const char *)to_path->data);
WT_STAT_CONN_INCR(session, log_prealloc_used);
@@ -1115,8 +1114,7 @@ __log_alloc_prealloc(WT_SESSION_IMPL *session, uint32_t to_num)
err:
__wt_scr_free(session, &from_path);
__wt_scr_free(session, &to_path);
- if (locked)
- __wt_spin_unlock(session, &log->log_fs_lock);
+ __wt_spin_unlock_if_owned(session, &log->log_fs_lock);
WT_TRET(__wt_fs_directory_list_free(session, &logfiles, logcount));
return (ret);
}
@@ -1893,11 +1891,9 @@ __wt_log_release(WT_SESSION_IMPL *session, WT_LOGSLOT *slot, bool *freep)
WT_LOG *log;
WT_LSN sync_lsn;
int64_t release_buffered, release_bytes;
- bool locked;
conn = S2C(session);
log = conn->log;
- locked = false;
if (freep != NULL)
*freep = 1;
release_buffered = WT_LOG_SLOT_RELEASED_BUFFERED(slot->slot_state);
@@ -1983,7 +1979,6 @@ __wt_log_release(WT_SESSION_IMPL *session, WT_LOGSLOT *slot, bool *freep)
__wt_cond_wait(session, log->log_sync_cond, 10 * WT_THOUSAND, NULL);
continue;
}
- locked = true;
/*
* Record the current end of our update after the lock. That is how far our calls can
@@ -2007,12 +2002,10 @@ __wt_log_release(WT_SESSION_IMPL *session, WT_LOGSLOT *slot, bool *freep)
* Clear the flags before leaving the loop.
*/
F_CLR_ATOMIC_16(slot, WT_SLOT_SYNC | WT_SLOT_SYNC_DIR);
- locked = false;
__wt_spin_unlock(session, &log->log_sync_lock);
}
err:
- if (locked)
- __wt_spin_unlock(session, &log->log_sync_lock);
+ __wt_spin_unlock_if_owned(session, &log->log_sync_lock);
if (ret != 0 && slot->slot_error == 0)
slot->slot_error = ret;
return (ret);
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_cursor.c b/src/third_party/wiredtiger/src/lsm/lsm_cursor.c
index e2a2a6888e7..8c441fdca1e 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_cursor.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_cursor.c
@@ -436,6 +436,8 @@ __clsm_open_cursors(WT_CURSOR_LSM *clsm, bool update, u_int start_chunk, uint32_
locked = false;
lsm_tree = clsm->lsm_tree;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
/*
* Ensure that any snapshot update has cursors on the right set of chunks to guarantee
* visibility is correct.
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_stat.c b/src/third_party/wiredtiger/src/lsm/lsm_stat.c
index 80924f8d6b1..7527fea15e2 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_stat.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_stat.c
@@ -29,6 +29,8 @@ __curstat_lsm_init(WT_SESSION_IMPL *session, const char *uri, WT_CURSOR_STAT *cs
WT_CONFIG_BASE(session, WT_SESSION_open_cursor), "checkpoint=" WT_CHECKPOINT, NULL, NULL};
bool locked;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
locked = false;
WT_RET(__wt_lsm_tree_get(session, uri, false, &lsm_tree));
WT_ERR(__wt_scr_alloc(session, 0, &uribuf));
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_tree.c b/src/third_party/wiredtiger/src/lsm/lsm_tree.c
index 5cdb060aa1d..398f1cfbc20 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_tree.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_tree.c
@@ -284,6 +284,8 @@ int
__wt_lsm_tree_setup_chunk(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, WT_LSM_CHUNK *chunk)
{
WT_ASSERT(session, FLD_ISSET(session->lock_flags, WT_SESSION_LOCKED_SCHEMA));
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
__wt_epoch(session, &chunk->create_time);
WT_RET(__wt_spin_init(session, &chunk->timestamp_spinlock, "LSM chunk timestamp"));
@@ -705,6 +707,8 @@ __wt_lsm_tree_switch(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree)
uint32_t chunks_moved, nchunks, new_id;
bool first_switch;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
__wt_lsm_tree_writelock(session, lsm_tree);
nchunks = lsm_tree->nchunks;
@@ -950,6 +954,9 @@ __wt_lsm_tree_truncate(WT_SESSION_IMPL *session, const char *name, const char *c
WT_UNUSED(cfg);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
chunk = NULL;
WT_NOT_READ(locked, false);
diff --git a/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c b/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c
index 4748e559575..eaaf452cc53 100644
--- a/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c
+++ b/src/third_party/wiredtiger/src/lsm/lsm_work_unit.c
@@ -325,6 +325,9 @@ __lsm_checkpoint_chunk(WT_SESSION_IMPL *session)
{
WT_DECL_RET;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
/*
* Turn on metadata tracking to ensure the checkpoint gets the necessary handle locks.
*/
diff --git a/src/third_party/wiredtiger/src/meta/meta_turtle.c b/src/third_party/wiredtiger/src/meta/meta_turtle.c
index 6fb19c3bbe4..4044c6a6a05 100644
--- a/src/third_party/wiredtiger/src/meta/meta_turtle.c
+++ b/src/third_party/wiredtiger/src/meta/meta_turtle.c
@@ -623,6 +623,7 @@ __wt_turtle_read(WT_SESSION_IMPL *session, const char *key, char **valuep)
/* Require single-threading. */
WT_ASSERT(session, FLD_ISSET(session->lock_flags, WT_SESSION_LOCKED_TURTLE));
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->turtle_lock);
/*
* Open the turtle file; there's one case where we won't find the turtle file, yet still
@@ -688,6 +689,7 @@ __wt_turtle_update(WT_SESSION_IMPL *session, const char *key, const char *value)
/* Require single-threading. */
WT_ASSERT(session, FLD_ISSET(session->lock_flags, WT_SESSION_LOCKED_TURTLE));
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->turtle_lock);
/*
* Create the turtle setup file: we currently re-write it from scratch every time.
diff --git a/src/third_party/wiredtiger/src/optrack/optrack.c b/src/third_party/wiredtiger/src/optrack/optrack.c
index 1bdd08313f3..65c3c1dad64 100644
--- a/src/third_party/wiredtiger/src/optrack/optrack.c
+++ b/src/third_party/wiredtiger/src/optrack/optrack.c
@@ -20,15 +20,12 @@ __wt_optrack_record_funcid(WT_SESSION_IMPL *session, const char *func, uint16_t
WT_DECL_ITEM(tmp);
WT_DECL_RET;
wt_off_t fsize;
- bool locked;
conn = S2C(session);
- locked = false;
WT_ERR(__wt_scr_alloc(session, strlen(func) + 32, &tmp));
__wt_spin_lock(session, &conn->optrack_map_spinlock);
- locked = true;
if (*func_idp == 0) {
*func_idp = ++optrack_uid;
@@ -42,8 +39,7 @@ err:
WT_IGNORE_RET(__wt_panic(session, ret, "operation tracking initialization failure"));
}
- if (locked)
- __wt_spin_unlock(session, &conn->optrack_map_spinlock);
+ __wt_spin_unlock_if_owned(session, &conn->optrack_map_spinlock);
__wt_scr_free(session, &tmp);
}
diff --git a/src/third_party/wiredtiger/src/os_common/os_fhandle.c b/src/third_party/wiredtiger/src/os_common/os_fhandle.c
index 18bdb756176..c13a1a2010d 100644
--- a/src/third_party/wiredtiger/src/os_common/os_fhandle.c
+++ b/src/third_party/wiredtiger/src/os_common/os_fhandle.c
@@ -388,6 +388,7 @@ __fsync_background(WT_SESSION_IMPL *session, WT_FH *fh)
uint64_t now;
conn = S2C(session);
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->fh_lock);
WT_STAT_CONN_INCR(session, fsync_all_fh_total);
handle = fh->handle;
diff --git a/src/third_party/wiredtiger/src/os_common/os_fs_inmemory.c b/src/third_party/wiredtiger/src/os_common/os_fs_inmemory.c
index 9450cbba1d5..8e71b399f56 100644
--- a/src/third_party/wiredtiger/src/os_common/os_fs_inmemory.c
+++ b/src/third_party/wiredtiger/src/os_common/os_fs_inmemory.c
@@ -34,6 +34,7 @@ __im_handle_search(WT_SESSION_IMPL *session, WT_FILE_SYSTEM *file_system, const
uint64_t bucket, hash;
im_fs = (WT_FILE_SYSTEM_INMEM *)file_system;
+ WT_ASSERT_SPINLOCK_OWNED(session, &im_fs->lock);
hash = __wt_hash_city64(name, strlen(name));
bucket = hash & (S2C(session)->hash_size - 1);
@@ -58,6 +59,13 @@ __im_handle_remove(
im_fs = (WT_FILE_SYSTEM_INMEM *)file_system;
+ /*
+ * We should hold the file system lock unless we are destroying the file system, in which case
+ * would call this with the force argument set to true.
+ */
+ if (!force)
+ WT_ASSERT_SPINLOCK_OWNED(session, &im_fs->lock);
+
if (im_fh->ref != 0) {
__wt_err(session, EBUSY, "%s: file-remove", im_fh->iface.name);
if (!force)
diff --git a/src/third_party/wiredtiger/src/reconcile/rec_write.c b/src/third_party/wiredtiger/src/reconcile/rec_write.c
index c8eaebbd2b0..5bad5df9d9b 100644
--- a/src/third_party/wiredtiger/src/reconcile/rec_write.c
+++ b/src/third_party/wiredtiger/src/reconcile/rec_write.c
@@ -183,6 +183,9 @@ __reconcile_post_wrapup(
btree = S2BT(session);
+ /* Ensure that we own the lock before unlocking the page, as we unlock it unconditionally. */
+ WT_ASSERT_SPINLOCK_OWNED(session, &page->modify->page_lock);
+
page->modify->flags = 0;
/* Release the reconciliation lock. */
@@ -251,6 +254,9 @@ __reconcile(WT_SESSION_IMPL *session, WT_REF *ref, WT_SALVAGE_COOKIE *salvage, u
btree = S2BT(session);
page = ref->page;
+ if (*page_lockedp)
+ WT_ASSERT_SPINLOCK_OWNED(session, &page->modify->page_lock);
+
/* Save the eviction state. */
__reconcile_save_evict_state(session, ref, flags);
diff --git a/src/third_party/wiredtiger/src/rollback_to_stable/rts_api.c b/src/third_party/wiredtiger/src/rollback_to_stable/rts_api.c
index bc3e55f7580..60d9556ab40 100644
--- a/src/third_party/wiredtiger/src/rollback_to_stable/rts_api.c
+++ b/src/third_party/wiredtiger/src/rollback_to_stable/rts_api.c
@@ -44,6 +44,9 @@ __rollback_to_stable_int(WT_SESSION_IMPL *session, bool no_ckpt)
txn_global = &conn->txn_global;
dryrun = conn->rts->dryrun;
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->schema_lock);
+
/*
* Rollback to stable should ignore tombstones in the history store since it needs to scan the
* entire table sequentially.
diff --git a/src/third_party/wiredtiger/src/schema/schema_alter.c b/src/third_party/wiredtiger/src/schema/schema_alter.c
index 6ebf476db4c..c749b5d456a 100644
--- a/src/third_party/wiredtiger/src/schema/schema_alter.c
+++ b/src/third_party/wiredtiger/src/schema/schema_alter.c
@@ -394,6 +394,9 @@ __schema_alter(WT_SESSION_IMPL *session, const char *uri, const char *newcfg[])
const char *cfg[] = {WT_CONFIG_BASE(session, WT_SESSION_alter), newcfg[0], NULL};
bool exclusive_refreshed;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
/*
* Determine what configuration says about exclusive access. A non exclusive alter that doesn't
* refresh in-memory configuration is only valid for the table objects.
@@ -440,6 +443,9 @@ __wt_schema_alter(WT_SESSION_IMPL *session, const char *uri, const char *newcfg[
WT_DECL_RET;
WT_SESSION_IMPL *int_session;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
WT_RET(__wt_schema_internal_session(session, &int_session));
WT_ERR(__wt_meta_track_on(int_session));
ret = __schema_alter(int_session, uri, newcfg);
diff --git a/src/third_party/wiredtiger/src/schema/schema_create.c b/src/third_party/wiredtiger/src/schema/schema_create.c
index 9501b3d82c7..5d805ac4f6c 100644
--- a/src/third_party/wiredtiger/src/schema/schema_create.c
+++ b/src/third_party/wiredtiger/src/schema/schema_create.c
@@ -1476,6 +1476,15 @@ __wt_schema_create(WT_SESSION_IMPL *session, const char *uri, const char *config
WT_DECL_RET;
WT_SESSION_IMPL *int_session;
+ /*
+ * We should be calling this function with the schema lock, but we cannot verify it here because
+ * we can re-enter this function with the internal session. If we get here using the internal
+ * session, we cannot check whether we own the lock, as it would be locked by the outer session.
+ * We can thus only check whether the lock is acquired, as opposed to, whether the lock is
+ * acquired by us.
+ */
+ WT_ASSERT(session, __wt_spin_locked(session, &S2C(session)->schema_lock));
+
WT_RET(__wt_schema_internal_session(session, &int_session));
ret = __schema_create(int_session, uri, config);
WT_TRET(__wt_schema_session_release(session, int_session));
diff --git a/src/third_party/wiredtiger/src/schema/schema_drop.c b/src/third_party/wiredtiger/src/schema/schema_drop.c
index 354e779b908..883065d1f25 100644
--- a/src/third_party/wiredtiger/src/schema/schema_drop.c
+++ b/src/third_party/wiredtiger/src/schema/schema_drop.c
@@ -183,10 +183,10 @@ __drop_tiered(WT_SESSION_IMPL *session, const char *uri, bool force, const char
WT_TIERED *tiered;
u_int i, localid;
const char *filename, *name;
- bool exist, got_dhandle, locked, remove_files, remove_shared;
+ bool exist, got_dhandle, remove_files, remove_shared;
conn = S2C(session);
- got_dhandle = locked = false;
+ got_dhandle = false;
WT_RET(__wt_config_gets(session, cfg, "remove_files", &cval));
remove_files = cval.val != 0;
WT_RET(__wt_config_gets(session, cfg, "remove_shared", &cval));
@@ -279,7 +279,6 @@ __drop_tiered(WT_SESSION_IMPL *session, const char *uri, bool force, const char
* race and process work for this handle.
*/
__wt_spin_lock(session, &conn->tiered_lock);
- locked = true;
/*
* Close all btree handles associated with this table. This must be done after we're done using
* the tiered structure because that is from the dhandle.
@@ -291,9 +290,8 @@ __drop_tiered(WT_SESSION_IMPL *session, const char *uri, bool force, const char
WT_ERR(ret);
/* If everything is successful, remove any tiered work associated with this tiered handle. */
- __wt_tiered_remove_work(session, tiered, locked);
+ __wt_tiered_remove_work(session, tiered, true);
__wt_spin_unlock(session, &conn->tiered_lock);
- locked = false;
__wt_verbose(session, WT_VERB_TIERED, "DROP_TIERED: remove tiered table %s from metadata", uri);
ret = __wt_metadata_remove(session, uri);
@@ -302,8 +300,7 @@ err:
if (got_dhandle)
WT_TRET(__wt_session_release_dhandle(session));
__wt_free(session, name);
- if (locked)
- __wt_spin_unlock(session, &conn->tiered_lock);
+ __wt_spin_unlock_if_owned(session, &conn->tiered_lock);
return (ret);
}
@@ -370,6 +367,15 @@ __wt_schema_drop(WT_SESSION_IMPL *session, const char *uri, const char *cfg[])
WT_DECL_RET;
WT_SESSION_IMPL *int_session;
+ /*
+ * We should be calling this function with the schema lock, but we cannot verify it here because
+ * we can re-enter this function with the internal session. If we get here using the internal
+ * session, we cannot check whether we own the lock, as it would be locked by the outer session.
+ * We can thus only check whether the lock is acquired, as opposed to, whether the lock is
+ * acquired by us.
+ */
+ WT_ASSERT(session, __wt_spin_locked(session, &S2C(session)->schema_lock));
+
WT_RET(__wt_schema_internal_session(session, &int_session));
ret = __schema_drop(int_session, uri, cfg);
WT_TRET(__wt_schema_session_release(session, int_session));
diff --git a/src/third_party/wiredtiger/src/schema/schema_rename.c b/src/third_party/wiredtiger/src/schema/schema_rename.c
index 372c64e6f27..0bfdc400765 100644
--- a/src/third_party/wiredtiger/src/schema/schema_rename.c
+++ b/src/third_party/wiredtiger/src/schema/schema_rename.c
@@ -321,6 +321,16 @@ __wt_schema_rename(WT_SESSION_IMPL *session, const char *uri, const char *newuri
WT_DECL_RET;
WT_SESSION_IMPL *int_session;
+ /*
+ * We should be calling this function with the checkpoint lock and the schema lock, but we
+ * cannot verify that here because we can re-enter this function with the internal session. If
+ * we get here using the internal session, we cannot check whether we own the locks, as they
+ * would be locked by the outer session. We can thus only check whether the locks are acquired,
+ * as opposed to, whether the locks are acquired by us.
+ */
+ WT_ASSERT(session, __wt_spin_locked(session, &S2C(session)->checkpoint_lock));
+ WT_ASSERT(session, __wt_spin_locked(session, &S2C(session)->schema_lock));
+
WT_RET(__wt_schema_internal_session(session, &int_session));
ret = __schema_rename(int_session, uri, newuri, cfg);
WT_TRET(__wt_schema_session_release(session, int_session));
diff --git a/src/third_party/wiredtiger/src/schema/schema_truncate.c b/src/third_party/wiredtiger/src/schema/schema_truncate.c
index 3393a9834e0..7e7d5f50bab 100644
--- a/src/third_party/wiredtiger/src/schema/schema_truncate.c
+++ b/src/third_party/wiredtiger/src/schema/schema_truncate.c
@@ -100,6 +100,9 @@ __wt_schema_truncate(WT_SESSION_IMPL *session, const char *uri, const char *cfg[
WT_DECL_RET;
const char *tablename;
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
tablename = uri;
if (WT_PREFIX_MATCH(uri, "file:"))
diff --git a/src/third_party/wiredtiger/src/session/session_api.c b/src/third_party/wiredtiger/src/session/session_api.c
index da182348426..dcff7b1d9d4 100644
--- a/src/third_party/wiredtiger/src/session/session_api.c
+++ b/src/third_party/wiredtiger/src/session/session_api.c
@@ -1410,6 +1410,9 @@ err:
static int
__session_salvage_worker(WT_SESSION_IMPL *session, const char *uri, const char *cfg[])
{
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->checkpoint_lock);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
WT_RET(__wt_schema_worker(
session, uri, __wt_salvage, NULL, cfg, WT_DHANDLE_EXCLUSIVE | WT_BTREE_SALVAGE));
WT_RET(
diff --git a/src/third_party/wiredtiger/src/session/session_compact.c b/src/third_party/wiredtiger/src/session/session_compact.c
index d7e9065dc99..1da742f95ac 100644
--- a/src/third_party/wiredtiger/src/session/session_compact.c
+++ b/src/third_party/wiredtiger/src/session/session_compact.c
@@ -159,6 +159,8 @@ __compact_handle_append(WT_SESSION_IMPL *session, const char *cfg[])
WT_UNUSED(cfg);
+ WT_ASSERT_SPINLOCK_OWNED(session, &S2C(session)->schema_lock);
+
WT_RET(__wt_session_get_dhandle(session, session->dhandle->name, NULL, NULL, 0));
/* Set compact active on the handle. */
diff --git a/src/third_party/wiredtiger/src/txn/txn_ckpt.c b/src/third_party/wiredtiger/src/txn/txn_ckpt.c
index 1d9c1741e42..067c21f5900 100644
--- a/src/third_party/wiredtiger/src/txn/txn_ckpt.c
+++ b/src/third_party/wiredtiger/src/txn/txn_ckpt.c
@@ -82,6 +82,9 @@ __checkpoint_flush_tier(WT_SESSION_IMPL *session, bool force)
WT_STAT_CONN_INCR(session, flush_tier);
conn = S2C(session);
cursor = NULL;
+
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->schema_lock);
+
/*
* For supporting splits and merge:
* - See if there is any merging work to do to prepare and create an object that is
@@ -671,6 +674,8 @@ __checkpoint_prepare(WT_SESSION_IMPL *session, bool *trackingp, const char *cfg[
txn_global = &conn->txn_global;
txn_shared = WT_SESSION_TXN_SHARED(session);
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->schema_lock);
+
WT_RET(__wt_config_gets(session, cfg, "use_timestamp", &cval));
use_timestamp = (cval.val != 0);
WT_RET(__wt_config_gets(session, cfg, "flush_tier.enabled", &cval));
@@ -1006,6 +1011,8 @@ __txn_checkpoint(WT_SESSION_IMPL *session, const char *cfg[])
saved_isolation = session->isolation;
full = idle = tracking = use_timestamp = false;
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->checkpoint_lock);
+
/* Avoid doing work if possible. */
WT_RET(__txn_checkpoint_can_skip(session, cfg, &full, &use_timestamp, &can_skip));
if (can_skip) {
@@ -1401,6 +1408,8 @@ __txn_checkpoint_wrapper(WT_SESSION_IMPL *session, const char *cfg[])
conn = S2C(session);
txn_global = &conn->txn_global;
+ WT_ASSERT_SPINLOCK_OWNED(session, &conn->checkpoint_lock);
+
WT_STAT_CONN_SET(session, txn_checkpoint_running, 1);
txn_global->checkpoint_running = true;
@@ -2451,6 +2460,8 @@ __wt_checkpoint_close(WT_SESSION_IMPL *session, bool final)
WT_DECL_RET;
bool bulk, metadata, need_tracking;
+ WT_ASSERT_SPINLOCK_OWNED(session, &session->dhandle->close_lock);
+
btree = S2BT(session);
bulk = F_ISSET(btree, WT_BTREE_BULK);
metadata = WT_IS_METADATA(session->dhandle);