summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/src/include/btree.i
diff options
context:
space:
mode:
Diffstat (limited to 'src/third_party/wiredtiger/src/include/btree.i')
-rw-r--r--src/third_party/wiredtiger/src/include/btree.i67
1 files changed, 61 insertions, 6 deletions
diff --git a/src/third_party/wiredtiger/src/include/btree.i b/src/third_party/wiredtiger/src/include/btree.i
index e48189d50ea..ad603f3ea53 100644
--- a/src/third_party/wiredtiger/src/include/btree.i
+++ b/src/third_party/wiredtiger/src/include/btree.i
@@ -107,7 +107,7 @@ __wt_cache_page_inmem_incr(WT_SESSION_IMPL *session, WT_PAGE *page, size_t size)
(void)__wt_atomic_addsize(&page->modify->bytes_dirty, size);
if (WT_PAGE_IS_INTERNAL(page))
(void)__wt_atomic_add64(&cache->bytes_dirty_intl, size);
- else {
+ else if (!F_ISSET(btree, WT_BTREE_LSM_PRIMARY)) {
(void)__wt_atomic_add64(&cache->bytes_dirty_leaf, size);
(void)__wt_atomic_add64(&btree->bytes_dirty_leaf, size);
}
@@ -241,7 +241,7 @@ __wt_cache_page_byte_dirty_decr(
if (WT_PAGE_IS_INTERNAL(page))
__wt_cache_decr_check_uint64(session, &cache->bytes_dirty_intl,
decr, "WT_CACHE.bytes_dirty_intl");
- else {
+ else if (!F_ISSET(btree, WT_BTREE_LSM_PRIMARY)) {
__wt_cache_decr_check_uint64(session, &btree->bytes_dirty_leaf,
decr, "WT_BTREE.bytes_dirty_leaf");
__wt_cache_decr_check_uint64(session, &cache->bytes_dirty_leaf,
@@ -300,8 +300,10 @@ __wt_cache_dirty_incr(WT_SESSION_IMPL *session, WT_PAGE *page)
(void)__wt_atomic_add64(&cache->bytes_dirty_intl, size);
(void)__wt_atomic_add64(&cache->pages_dirty_intl, 1);
} else {
- (void)__wt_atomic_add64(&btree->bytes_dirty_leaf, size);
- (void)__wt_atomic_add64(&cache->bytes_dirty_leaf, size);
+ if (!F_ISSET(btree, WT_BTREE_LSM_PRIMARY)) {
+ (void)__wt_atomic_add64(&btree->bytes_dirty_leaf, size);
+ (void)__wt_atomic_add64(&cache->bytes_dirty_leaf, size);
+ }
(void)__wt_atomic_add64(&cache->pages_dirty_leaf, 1);
}
(void)__wt_atomic_addsize(&page->modify->bytes_dirty, size);
@@ -394,7 +396,7 @@ __wt_cache_page_evict(WT_SESSION_IMPL *session, WT_PAGE *page)
__wt_cache_decr_zero_uint64(session,
&cache->bytes_dirty_intl,
modify->bytes_dirty, "WT_CACHE.bytes_dirty_intl");
- else {
+ else if (!F_ISSET(btree, WT_BTREE_LSM_PRIMARY)) {
__wt_cache_decr_zero_uint64(session,
&cache->bytes_dirty_leaf,
modify->bytes_dirty, "WT_CACHE.bytes_dirty_leaf");
@@ -406,7 +408,11 @@ __wt_cache_page_evict(WT_SESSION_IMPL *session, WT_PAGE *page)
/* Update pages and bytes evicted. */
(void)__wt_atomic_add64(&cache->bytes_evict, page->memory_footprint);
- (void)__wt_atomic_addv64(&cache->pages_evict, 1);
+
+ if (F_ISSET(session, WT_SESSION_IN_SPLIT))
+ (void)__wt_atomic_subv64(&cache->pages_inmem, 1);
+ else
+ (void)__wt_atomic_addv64(&cache->pages_evict, 1);
}
/*
@@ -1543,6 +1549,55 @@ __wt_btree_lsm_over_size(WT_SESSION_IMPL *session, uint64_t maxsize)
}
/*
+ * __wt_btree_lsm_switch_primary --
+ * Switch a btree handle to/from the current primary chunk of an LSM tree.
+ */
+static inline void
+__wt_btree_lsm_switch_primary(WT_SESSION_IMPL *session, bool on)
+{
+ WT_BTREE *btree;
+ WT_CACHE *cache;
+ WT_PAGE *child, *root;
+ WT_PAGE_INDEX *pindex;
+ WT_REF *first;
+ size_t size;
+
+ btree = S2BT(session);
+ cache = S2C(session)->cache;
+ root = btree->root.page;
+
+ if (!F_ISSET(btree, WT_BTREE_LSM_PRIMARY))
+ F_SET(btree, WT_BTREE_LSM_PRIMARY | WT_BTREE_NO_EVICTION);
+ if (!on && F_ISSET(btree, WT_BTREE_LSM_PRIMARY)) {
+ pindex = WT_INTL_INDEX_GET_SAFE(root);
+ if (!F_ISSET(btree, WT_BTREE_NO_EVICTION) ||
+ pindex->entries != 1)
+ return;
+ first = pindex->index[0];
+
+ /*
+ * We're reaching down into the page without a hazard pointer,
+ * but that's OK because we know that no-eviction is set so the
+ * page can't disappear.
+ *
+ * While this tree was the primary, its dirty bytes were not
+ * included in the cache accounting. Fix that now before we
+ * open it up for eviction.
+ */
+ child = first->page;
+ if (first->state == WT_REF_MEM &&
+ child->type == WT_PAGE_ROW_LEAF &&
+ __wt_page_is_modified(child)) {
+ size = child->modify->bytes_dirty;
+ (void)__wt_atomic_add64(&btree->bytes_dirty_leaf, size);
+ (void)__wt_atomic_add64(&cache->bytes_dirty_leaf, size);
+ }
+
+ F_CLR(btree, WT_BTREE_LSM_PRIMARY | WT_BTREE_NO_EVICTION);
+ }
+}
+
+/*
* __wt_split_descent_race --
* Return if we raced with an internal page split when descending the tree.
*/