summaryrefslogtreecommitdiff
path: root/innobase/buf/buf0flu.c
diff options
context:
space:
mode:
authortsmith@quadxeon.mysql.com <>2006-11-09 05:02:37 +0100
committertsmith@quadxeon.mysql.com <>2006-11-09 05:02:37 +0100
commitf1e0cf9d285269571e96ac4fc058e7494a69c7c7 (patch)
tree03d4d71d67b4aaee305c7431903f593ad5229733 /innobase/buf/buf0flu.c
parent41117b1226bcb02536bf963d9a18a6140d5ffe1a (diff)
downloadmariadb-git-f1e0cf9d285269571e96ac4fc058e7494a69c7c7.tar.gz
This ChangeSet must be null-merged to 5.1. Applied innodb-5.0-ss982, -ss998, -ss1003
Fixes: - Bug #15815: Very poor performance with multiple queries running concurrently - Bug #22868: 'Thread thrashing' with > 50 concurrent conns under an upd-intensive workloadw - Bug #23769: Debug assertion failure with innodb_locks_unsafe_for_binlog - Bug #24089: Race condition in fil_flush_file_spaces()
Diffstat (limited to 'innobase/buf/buf0flu.c')
-rw-r--r--innobase/buf/buf0flu.c92
1 files changed, 65 insertions, 27 deletions
diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c
index e39d1ae0a71..fc7b60bf5fb 100644
--- a/innobase/buf/buf0flu.c
+++ b/innobase/buf/buf0flu.c
@@ -114,6 +114,7 @@ buf_flush_ready_for_replace(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
+ ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
if (block->state != BUF_BLOCK_FILE_PAGE) {
ut_print_timestamp(stderr);
@@ -148,6 +149,7 @@ buf_flush_ready_for_flush(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
+ ut_ad(mutex_own(&(block->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
@@ -539,8 +541,15 @@ buf_flush_try_page(
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
+ if (!block) {
+ mutex_exit(&(buf_pool->mutex));
+ return(0);
+ }
+
+ mutex_enter(&block->mutex);
+
if (flush_type == BUF_FLUSH_LIST
- && block && buf_flush_ready_for_flush(block, flush_type)) {
+ && buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
@@ -578,6 +587,7 @@ buf_flush_try_page(
locked = TRUE;
}
+ mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (!locked) {
@@ -598,8 +608,8 @@ buf_flush_try_page(
return(1);
- } else if (flush_type == BUF_FLUSH_LRU && block
- && buf_flush_ready_for_flush(block, flush_type)) {
+ } else if (flush_type == BUF_FLUSH_LRU
+ && buf_flush_ready_for_flush(block, flush_type)) {
/* VERY IMPORTANT:
Because any thread may call the LRU flush, even when owning
@@ -639,14 +649,15 @@ buf_flush_try_page(
buf_pool mutex: this ensures that the latch is acquired
immediately. */
+ mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
buf_flush_write_block_low(block);
return(1);
- } else if (flush_type == BUF_FLUSH_SINGLE_PAGE && block
- && buf_flush_ready_for_flush(block, flush_type)) {
+ } else if (flush_type == BUF_FLUSH_SINGLE_PAGE
+ && buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
@@ -672,6 +683,7 @@ buf_flush_try_page(
(buf_pool->n_flush[flush_type])++;
+ mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);
@@ -688,11 +700,12 @@ buf_flush_try_page(
buf_flush_write_block_low(block);
return(1);
- } else {
- mutex_exit(&(buf_pool->mutex));
+ }
- return(0);
- }
+ mutex_exit(&block->mutex);
+ mutex_exit(&(buf_pool->mutex));
+
+ return(0);
}
/***************************************************************
@@ -737,34 +750,48 @@ buf_flush_try_neighbors(
block = buf_page_hash_get(space, i);
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
- if (block && flush_type == BUF_FLUSH_LRU && i != offset
- && !block->old) {
+ if (!block) {
+
+ continue;
+
+ } else if (flush_type == BUF_FLUSH_LRU && i != offset
+ && !block->old) {
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
continue;
- }
+ } else {
+
+ mutex_enter(&block->mutex);
+
+ if (buf_flush_ready_for_flush(block, flush_type)
+ && (i == offset || block->buf_fix_count == 0)) {
+ /* We only try to flush those
+ neighbors != offset where the buf fix count is
+ zero, as we then know that we probably can
+ latch the page without a semaphore wait.
+ Semaphore waits are expensive because we must
+ flush the doublewrite buffer before we start
+ waiting. */
- if (block && buf_flush_ready_for_flush(block, flush_type)
- && (i == offset || block->buf_fix_count == 0)) {
- /* We only try to flush those neighbors != offset
- where the buf fix count is zero, as we then know that
- we probably can latch the page without a semaphore
- wait. Semaphore waits are expensive because we must
- flush the doublewrite buffer before we start
- waiting. */
+ mutex_exit(&block->mutex);
- mutex_exit(&(buf_pool->mutex));
+ mutex_exit(&(buf_pool->mutex));
- /* Note: as we release the buf_pool mutex above, in
- buf_flush_try_page we cannot be sure the page is still
- in a flushable state: therefore we check it again
- inside that function. */
+ /* Note: as we release the buf_pool mutex
+ above, in buf_flush_try_page we cannot be sure
+ the page is still in a flushable state:
+ therefore we check it again inside that
+ function. */
- count += buf_flush_try_page(space, i, flush_type);
+ count += buf_flush_try_page(space, i,
+ flush_type);
- mutex_enter(&(buf_pool->mutex));
+ mutex_enter(&(buf_pool->mutex));
+ } else {
+ mutex_exit(&block->mutex);
+ }
}
}
@@ -858,12 +885,15 @@ buf_flush_batch(
while ((block != NULL) && !found) {
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
+ mutex_enter(&block->mutex);
+
if (buf_flush_ready_for_flush(block, flush_type)) {
found = TRUE;
space = block->space;
offset = block->offset;
+ mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
old_page_count = page_count;
@@ -881,10 +911,14 @@ buf_flush_batch(
} else if (flush_type == BUF_FLUSH_LRU) {
+ mutex_exit(&block->mutex);
+
block = UT_LIST_GET_PREV(LRU, block);
} else {
ut_ad(flush_type == BUF_FLUSH_LIST);
+ mutex_exit(&block->mutex);
+
block = UT_LIST_GET_PREV(flush_list, block);
}
}
@@ -966,10 +1000,14 @@ buf_flush_LRU_recommendation(void)
+ BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
+ mutex_enter(&block->mutex);
+
if (buf_flush_ready_for_replace(block)) {
n_replaceable++;
}
+ mutex_exit(&block->mutex);
+
distance++;
block = UT_LIST_GET_PREV(LRU, block);