summaryrefslogtreecommitdiff
path: root/storage/xtradb/buf/buf0flu.cc
diff options
context:
space:
mode:
authorJan Lindström <jplindst@mariadb.org>2014-03-27 09:35:24 +0200
committerJan Lindström <jplindst@mariadb.org>2014-03-27 09:35:24 +0200
commit502733803979e2109b6dcdcb3d8c5a0ddd6d2363 (patch)
tree2b5f3ef37c47d05fe08b169f6ad91dcc92e90f8b /storage/xtradb/buf/buf0flu.cc
parentf761835b5c13158fd958a5239b346daa09b06cc6 (diff)
downloadmariadb-git-502733803979e2109b6dcdcb3d8c5a0ddd6d2363.tar.gz
Fix bug https://code.launchpad.net/~laurynas-biveinis/percona-server/bug1295268
(Inadequate background LRU flushing for write workloads with InnoDB compression). If InnoDB compression is used and the workload has writes, the following situation is possible. The LRU flusher issues an LRU flush request for an instance. buf_do_LRU_batch decides to perform unzip_LRU eviction and this eviction might fully satisfy the request. Then buf_flush_LRU_tail checks the number of flushed pages in the last iteration, finds it to be zero, and wrongly decides not to flush that instance anymore. Fixed by maintaining unzip_LRU eviction counter in struct flush_counter_t variables, and checking it in buf_flush_LRU_tail when deciding whether to stop flushing the current instance. Added test cases for new configuration files to get mysql-test-run suite sys_vars to pass. Fix some small errors.
Diffstat (limited to 'storage/xtradb/buf/buf0flu.cc')
-rw-r--r--storage/xtradb/buf/buf0flu.cc24
1 files changed, 16 insertions, 8 deletions
diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc
index 53ac9bb9cc7..f4ba0f10761 100644
--- a/storage/xtradb/buf/buf0flu.cc
+++ b/storage/xtradb/buf/buf0flu.cc
@@ -1549,6 +1549,7 @@ buf_flush_LRU_list_batch(
n->flushed = 0;
n->evicted = 0;
+ n->unzip_LRU_evicted = 0;
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
@@ -1660,21 +1661,22 @@ buf_do_LRU_batch(
flush_counters_t* n) /*!< out: flushed/evicted page
counts */
{
- ulint count = 0;
-
if (buf_LRU_evict_from_unzip_LRU(buf_pool)) {
- count += buf_free_from_unzip_LRU_list_batch(buf_pool, max);
+ n->unzip_LRU_evicted
+ += buf_free_from_unzip_LRU_list_batch(buf_pool, max);
+ } else {
+ n->unzip_LRU_evicted = 0;
}
- if (max > count) {
- buf_flush_LRU_list_batch(buf_pool, max - count, limited_scan,
- n);
+ if (max > n->unzip_LRU_evicted) {
+ buf_flush_LRU_list_batch(buf_pool, max - n->unzip_LRU_evicted,
+ limited_scan, n);
} else {
n->evicted = 0;
n->flushed = 0;
}
- n->flushed += count;
+ n->evicted += n->unzip_LRU_evicted;
}
/*******************************************************************//**
@@ -2306,9 +2308,15 @@ buf_flush_LRU_tail(void)
requested_pages[i] += lru_chunk_size;
+ /* If we failed to flush or evict this
+ instance, do not bother anymore. But take into
+ account that we might have zero flushed pages
+ because the flushing request was fully
+ satisfied by unzip_LRU evictions. */
if (requested_pages[i] >= scan_depth[i]
|| !(srv_cleaner_eviction_factor
- ? n.evicted : n.flushed)) {
+ ? n.evicted
+ : (n.flushed + n.unzip_LRU_evicted))) {
active_instance[i] = false;
remaining_instances--;