summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/src/include/cache.i
diff options
context:
space:
mode:
Diffstat (limited to 'src/third_party/wiredtiger/src/include/cache.i')
-rw-r--r--src/third_party/wiredtiger/src/include/cache.i186
1 files changed, 136 insertions, 50 deletions
diff --git a/src/third_party/wiredtiger/src/include/cache.i b/src/third_party/wiredtiger/src/include/cache.i
index 72c8307756d..b5605769f1a 100644
--- a/src/third_party/wiredtiger/src/include/cache.i
+++ b/src/third_party/wiredtiger/src/include/cache.i
@@ -7,6 +7,17 @@
*/
/*
+ * __wt_cache_aggressive --
+ * Indicate if the cache is operating in aggressive mode.
+ */
+static inline bool
+__wt_cache_aggressive(WT_SESSION_IMPL *session)
+{
+ return (S2C(session)->cache->evict_aggressive_score >=
+ WT_EVICT_SCORE_CUTOFF);
+}
+
+/*
* __wt_cache_read_gen --
* Get the current read generation number.
*/
@@ -68,6 +79,33 @@ __wt_cache_read_gen_new(WT_SESSION_IMPL *session, WT_PAGE *page)
}
/*
+ * __wt_cache_stuck --
+ * Indicate if the cache is stuck (i.e., not making progress).
+ */
+static inline bool
+__wt_cache_stuck(WT_SESSION_IMPL *session)
+{
+ WT_CACHE *cache;
+
+ cache = S2C(session)->cache;
+ return (cache->evict_aggressive_score == WT_EVICT_SCORE_MAX &&
+ F_ISSET(cache,
+ WT_CACHE_EVICT_CLEAN_HARD | WT_CACHE_EVICT_DIRTY_HARD));
+}
+
+/*
+ * __wt_page_evict_soon --
+ * Set a page to be evicted as soon as possible.
+ */
+static inline void
+__wt_page_evict_soon(WT_SESSION_IMPL *session, WT_REF *ref)
+{
+ WT_UNUSED(session);
+
+ ref->page->read_gen = WT_READGEN_OLDEST;
+}
+
+/*
* __wt_cache_pages_inuse --
* Return the number of pages in use.
*/
@@ -78,21 +116,26 @@ __wt_cache_pages_inuse(WT_CACHE *cache)
}
/*
+ * __wt_cache_bytes_plus_overhead --
+ * Apply the cache overhead to a size in bytes.
+ */
+static inline uint64_t
+__wt_cache_bytes_plus_overhead(WT_CACHE *cache, uint64_t sz)
+{
+ if (cache->overhead_pct != 0)
+ sz += (sz * (uint64_t)cache->overhead_pct) / 100;
+
+ return (sz);
+}
+
+/*
* __wt_cache_bytes_inuse --
* Return the number of bytes in use.
*/
static inline uint64_t
__wt_cache_bytes_inuse(WT_CACHE *cache)
{
- uint64_t bytes_inuse;
-
- /* Adjust the cache size to take allocation overhead into account. */
- bytes_inuse = cache->bytes_inmem;
- if (cache->overhead_pct != 0)
- bytes_inuse +=
- (bytes_inuse * (uint64_t)cache->overhead_pct) / 100;
-
- return (bytes_inuse);
+ return (__wt_cache_bytes_plus_overhead(cache, cache->bytes_inmem));
}
/*
@@ -102,14 +145,48 @@ __wt_cache_bytes_inuse(WT_CACHE *cache)
static inline uint64_t
__wt_cache_dirty_inuse(WT_CACHE *cache)
{
- uint64_t dirty_inuse;
+ return (__wt_cache_bytes_plus_overhead(cache,
+ cache->bytes_dirty_intl + cache->bytes_dirty_leaf));
+}
- dirty_inuse = cache->bytes_dirty;
- if (cache->overhead_pct != 0)
- dirty_inuse +=
- (dirty_inuse * (uint64_t)cache->overhead_pct) / 100;
+/*
+ * __wt_cache_dirty_leaf_inuse --
+ * Return the number of dirty bytes in use by leaf pages.
+ */
+static inline uint64_t
+__wt_cache_dirty_leaf_inuse(WT_CACHE *cache)
+{
+ return (__wt_cache_bytes_plus_overhead(cache, cache->bytes_dirty_leaf));
+}
- return (dirty_inuse);
+/*
+ * __wt_cache_bytes_image --
+ * Return the number of page image bytes in use.
+ */
+static inline uint64_t
+__wt_cache_bytes_image(WT_CACHE *cache)
+{
+ return (__wt_cache_bytes_plus_overhead(cache, cache->bytes_image));
+}
+
+/*
+ * __wt_cache_bytes_other --
+ * Return the number of bytes in use not for page images.
+ */
+static inline uint64_t
+__wt_cache_bytes_other(WT_CACHE *cache)
+{
+ uint64_t bytes_image, bytes_inmem;
+
+ bytes_image = cache->bytes_image;
+ bytes_inmem = cache->bytes_inmem;
+
+ /*
+ * The reads above could race with changes to the values, so protect
+ * against underflow.
+ */
+ return ((bytes_image > bytes_inmem) ? 0 :
+ __wt_cache_bytes_plus_overhead(cache, bytes_inmem - bytes_image));
}
/*
@@ -139,28 +216,18 @@ __wt_session_can_wait(WT_SESSION_IMPL *session)
}
/*
- * __wt_eviction_dirty_target --
- * Return if the eviction server is running to reduce the number of dirty
- * pages (versus running to discard pages from the cache).
- */
-static inline bool
-__wt_eviction_dirty_target(WT_SESSION_IMPL *session)
-{
- return (FLD_ISSET(S2C(session)->cache->state, WT_EVICT_PASS_DIRTY));
-}
-
-/*
* __wt_eviction_needed --
* Return if an application thread should do eviction, and the cache full
- * percentage as a side-effect.
+ * percentage as a side-effect.
*/
static inline bool
-__wt_eviction_needed(WT_SESSION_IMPL *session, u_int *pct_fullp)
+__wt_eviction_needed(WT_SESSION_IMPL *session, bool busy, u_int *pct_fullp)
{
WT_CONNECTION_IMPL *conn;
WT_CACHE *cache;
- uint64_t bytes_inuse, bytes_max;
- u_int pct_full;
+ double dirty_trigger;
+ uint64_t bytes_inuse, bytes_max, dirty_inuse;
+ u_int pct_dirty, pct_full;
conn = S2C(session);
cache = conn->cache;
@@ -176,33 +243,36 @@ __wt_eviction_needed(WT_SESSION_IMPL *session, u_int *pct_fullp)
* Avoid division by zero if the cache size has not yet been set in a
* shared cache.
*/
- bytes_inuse = __wt_cache_bytes_inuse(cache);
bytes_max = conn->cache_size + 1;
+ bytes_inuse = __wt_cache_bytes_inuse(cache);
+ dirty_inuse = __wt_cache_dirty_leaf_inuse(cache);
/*
* Calculate the cache full percentage; anything over the trigger means
* we involve the application thread.
*/
- pct_full = (u_int)((100 * bytes_inuse) / bytes_max);
- if (pct_fullp != NULL)
- *pct_fullp = pct_full;
- /*
- * If the connection is closing we do not need eviction from an
- * application thread. The eviction subsystem is already closed.
- * We return here because some callers depend on the percent full
- * having been filled in.
- */
- if (F_ISSET(conn, WT_CONN_CLOSING))
- return (false);
+ if (pct_fullp != NULL) {
+ pct_full = (u_int)((100 * bytes_inuse) / bytes_max);
+ pct_dirty = (u_int)((100 * dirty_inuse) / bytes_max);
- if (pct_full > cache->eviction_trigger)
- return (true);
+ *pct_fullp = (u_int)WT_MAX(0, 100 - WT_MIN(
+ (int)cache->eviction_trigger - (int)pct_full,
+ (int)cache->eviction_dirty_trigger - (int)pct_dirty));
+ }
- /* Return if there are too many dirty bytes in cache. */
- if (__wt_cache_dirty_inuse(cache) >
- (cache->eviction_dirty_trigger * bytes_max) / 100)
- return (true);
- return (false);
+ /*
+ * Only check the dirty trigger when the session is not busy.
+ *
+ * In other words, once we are pinning resources, try to finish the
+ * operation as quickly as possible without exceeding the cache size.
+ * The next transaction in this session will not be able to start until
+ * the cache is under the limit.
+ */
+ if ((dirty_trigger = cache->eviction_scrub_limit) < 1.0)
+ dirty_trigger = (double)cache->eviction_dirty_trigger;
+ return (bytes_inuse > (cache->eviction_trigger * bytes_max) / 100 ||
+ (!busy &&
+ dirty_inuse > (uint64_t)(dirty_trigger * bytes_max) / 100));
}
/*
@@ -229,12 +299,28 @@ static inline int
__wt_cache_eviction_check(WT_SESSION_IMPL *session, bool busy, bool *didworkp)
{
WT_BTREE *btree;
+ WT_TXN_GLOBAL *txn_global;
+ WT_TXN_STATE *txn_state;
u_int pct_full;
if (didworkp != NULL)
*didworkp = false;
/*
+ * If the current transaction is keeping the oldest ID pinned, it is in
+ * the middle of an operation. This may prevent the oldest ID from
+ * moving forward, leading to deadlock, so only evict what we can.
+ * Otherwise, we are at a transaction boundary and we can work harder
+ * to make sure there is free space in the cache.
+ */
+ txn_global = &S2C(session)->txn_global;
+ txn_state = WT_SESSION_TXN_STATE(session);
+ busy = busy || txn_state->id != WT_TXN_NONE ||
+ session->nhazard > 0 ||
+ (txn_state->snap_min != WT_TXN_NONE &&
+ txn_global->current != txn_global->oldest_id);
+
+ /*
* LSM sets the no-cache-check flag when holding the LSM tree lock, in
* that case, or when holding the schema or handle list locks (which
* block eviction), we don't want to highjack the thread for eviction.
@@ -256,7 +342,7 @@ __wt_cache_eviction_check(WT_SESSION_IMPL *session, bool busy, bool *didworkp)
return (0);
/* Check if eviction is needed. */
- if (!__wt_eviction_needed(session, &pct_full))
+ if (!__wt_eviction_needed(session, busy, &pct_full))
return (0);
/*