summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2013-02-28 22:23:45 +0100
committerSergei Golubchik <sergii@pisem.net>2013-02-28 22:23:45 +0100
commita9f12c27c3b260f79ff0d28df8d2006ebea84b3a (patch)
tree00d26ab7a42dc16d331f81a8aebaa05109a56c90
parent37a5a54a01ab00fb334c89b6ed5e7dc6cef22fe5 (diff)
downloadmariadb-git-a9f12c27c3b260f79ff0d28df8d2006ebea84b3a.tar.gz
Percona-Server-5.5.29-rel30.0.tar.gz
-rw-r--r--btr/btr0btr.c58
-rw-r--r--btr/btr0cur.c26
-rw-r--r--buf/buf0buf.c9
-rw-r--r--buf/buf0flu.c16
-rw-r--r--buf/buf0lru.c134
-rw-r--r--buf/buf0rea.c10
-rw-r--r--dict/dict0dict.c45
-rw-r--r--fil/fil0fil.c80
-rw-r--r--handler/ha_innodb.cc103
-rw-r--r--handler/ha_innodb.h9
-rw-r--r--handler/handler0alter.cc8
-rw-r--r--handler/i_s.cc7
-rw-r--r--ibuf/ibuf0ibuf.c40
-rw-r--r--include/btr0cur.h2
-rw-r--r--include/buf0buf.h24
-rw-r--r--include/buf0buf.ic31
-rw-r--r--include/dict0dict.h12
-rw-r--r--include/dict0mem.h7
-rw-r--r--include/log0online.h44
-rw-r--r--include/os0file.h2
-rw-r--r--include/page0zip.h8
-rw-r--r--include/row0undo.h7
-rw-r--r--include/row0upd.ic3
-rw-r--r--include/srv0srv.h2
-rw-r--r--include/sync0sync.h2
-rw-r--r--include/univ.i18
-rw-r--r--lock/lock0lock.c12
-rw-r--r--log/log0online.c319
-rw-r--r--log/log0recv.c5
-rw-r--r--os/os0file.c17
-rw-r--r--page/page0cur.c7
-rw-r--r--page/page0page.c24
-rw-r--r--page/page0zip.c157
-rw-r--r--row/row0mysql.c5
-rw-r--r--row/row0sel.c27
-rw-r--r--row/row0umod.c53
-rw-r--r--row/row0undo.c19
-rw-r--r--srv/srv0srv.c38
-rw-r--r--srv/srv0start.c5
39 files changed, 1003 insertions, 392 deletions
diff --git a/btr/btr0btr.c b/btr/btr0btr.c
index 1113e05603c..8a6f094fc3e 100644
--- a/btr/btr0btr.c
+++ b/btr/btr0btr.c
@@ -1641,7 +1641,7 @@ btr_page_reorganize_low(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
btr_assert_not_corrupted(block, index);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
data_size1 = page_get_data_size(page);
max_ins_size1 = page_get_max_insert_size_after_reorganize(page, 1);
@@ -1760,7 +1760,7 @@ btr_page_reorganize_low(
func_exit:
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
#ifndef UNIV_HOTBACKUP
buf_block_free(temp_block);
@@ -1835,7 +1835,7 @@ btr_page_empty(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_zip == buf_block_get_page_zip(block));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
btr_search_drop_page_hash_index(block);
@@ -1892,10 +1892,10 @@ btr_root_raise_and_insert(
root_block = btr_cur_get_block(cursor);
root_page_zip = buf_block_get_page_zip(root_block);
ut_ad(page_get_n_recs(root) > 0);
+ index = btr_cur_get_index(cursor);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!root_page_zip || page_zip_validate(root_page_zip, root));
+ ut_a(!root_page_zip || page_zip_validate(root_page_zip, root, index));
#endif /* UNIV_ZIP_DEBUG */
- index = btr_cur_get_index(cursor);
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
ulint space = dict_index_get_space(index);
@@ -2825,8 +2825,8 @@ insert_empty:
#ifdef UNIV_ZIP_DEBUG
if (UNIV_LIKELY_NULL(page_zip)) {
- ut_a(page_zip_validate(page_zip, page));
- ut_a(page_zip_validate(new_page_zip, new_page));
+ ut_a(page_zip_validate(page_zip, page, cursor->index));
+ ut_a(page_zip_validate(new_page_zip, new_page, cursor->index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -2860,7 +2860,8 @@ insert_empty:
= buf_block_get_page_zip(insert_block);
ut_a(!insert_page_zip
- || page_zip_validate(insert_page_zip, insert_page));
+ || page_zip_validate(insert_page_zip, insert_page,
+ cursor->index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -3225,7 +3226,7 @@ btr_lift_page_up(
btr_page_set_level(page, page_zip, page_level, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3310,6 +3311,7 @@ btr_compress(
if (adjust) {
nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor));
+ ut_ad(nth_rec > 0);
}
/* Decide the page to which we try to merge and which will inherit
@@ -3400,8 +3402,8 @@ err_exit:
const page_zip_des_t* page_zip
= buf_block_get_page_zip(block);
ut_a(page_zip);
- ut_a(page_zip_validate(merge_page_zip, merge_page));
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(merge_page_zip, merge_page, index));
+ ut_a(page_zip_validate(page_zip, page, index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -3534,7 +3536,8 @@ err_exit:
ut_ad(page_validate(merge_page, index));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!merge_page_zip || page_zip_validate(merge_page_zip, merge_page));
+ ut_a(!merge_page_zip || page_zip_validate(merge_page_zip, merge_page,
+ index));
#endif /* UNIV_ZIP_DEBUG */
/* Free the file page */
@@ -3545,6 +3548,7 @@ func_exit:
mem_heap_free(heap);
if (adjust) {
+ ut_ad(nth_rec > 0);
btr_cur_position(
index,
page_rec_get_nth(merge_block->frame, nth_rec),
@@ -3716,7 +3720,7 @@ btr_discard_page(
page_zip_des_t* merge_page_zip
= buf_block_get_page_zip(merge_block);
ut_a(!merge_page_zip
- || page_zip_validate(merge_page_zip, merge_page));
+ || page_zip_validate(merge_page_zip, merge_page, index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -4058,8 +4062,22 @@ btr_index_page_validate(
{
page_cur_t cur;
ibool ret = TRUE;
+#ifndef DBUG_OFF
+ ulint nth = 1;
+#endif /* !DBUG_OFF */
page_cur_set_before_first(block, &cur);
+
+ /* Directory slot 0 should only contain the infimum record. */
+ DBUG_EXECUTE_IF("check_table_rec_next",
+ ut_a(page_rec_get_nth_const(
+ page_cur_get_page(&cur), 0)
+ == cur.rec);
+ ut_a(page_dir_slot_get_n_owned(
+ page_dir_get_nth_slot(
+ page_cur_get_page(&cur), 0))
+ == 1););
+
page_cur_move_to_next(&cur);
for (;;) {
@@ -4073,6 +4091,16 @@ btr_index_page_validate(
return(FALSE);
}
+ /* Verify that page_rec_get_nth_const() is correctly
+ retrieving each record. */
+ DBUG_EXECUTE_IF("check_table_rec_next",
+ ut_a(cur.rec == page_rec_get_nth_const(
+ page_cur_get_page(&cur),
+ page_rec_get_n_recs_before(
+ cur.rec)));
+ ut_a(nth++ == page_rec_get_n_recs_before(
+ cur.rec)););
+
page_cur_move_to_next(&cur);
}
@@ -4170,7 +4198,7 @@ btr_validate_level(
ut_a(space == page_get_space_id(page));
#ifdef UNIV_ZIP_DEBUG
page_zip = buf_block_get_page_zip(block);
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
ut_a(!page_is_leaf(page));
@@ -4198,7 +4226,7 @@ loop:
#ifdef UNIV_ZIP_DEBUG
page_zip = buf_block_get_page_zip(block);
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
/* Check ordering etc. of records */
diff --git a/btr/btr0cur.c b/btr/btr0cur.c
index 687853a422e..900da4f2449 100644
--- a/btr/btr0cur.c
+++ b/btr/btr0cur.c
@@ -748,7 +748,7 @@ retry_page_get:
#ifdef UNIV_ZIP_DEBUG
const page_zip_des_t* page_zip
= buf_block_get_page_zip(block);
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
buf_block_dbg_add_level(
@@ -2189,7 +2189,7 @@ any_extern:
page_zip = buf_block_get_page_zip(block);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_zip
@@ -2406,7 +2406,7 @@ btr_cur_pessimistic_update(
MTR_MEMO_X_LOCK));
ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
@@ -2561,7 +2561,7 @@ make_external:
btr_search_update_hash_on_delete(cursor);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
@@ -2668,7 +2668,7 @@ make_external:
buf_block_t* rec_block = btr_cur_get_block(cursor);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
page = buf_block_get_frame(rec_block);
#endif /* UNIV_ZIP_DEBUG */
page_zip = buf_block_get_page_zip(rec_block);
@@ -2694,7 +2694,7 @@ make_external:
return_after_reservations:
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (n_extents > 0) {
@@ -3066,7 +3066,7 @@ btr_cur_set_deleted_flag_for_ibuf(
when the tablespace is
uncompressed */
ibool val, /*!< in: value to set */
- mtr_t* mtr) /*!< in: mtr */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
{
/* We do not need to reserve btr_search_latch, as the page
has just been read to the buffer pool and there cannot be
@@ -3171,12 +3171,14 @@ btr_cur_optimistic_delete(
page, 1);
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip
+ || page_zip_validate(page_zip, page, cursor->index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
cursor->index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip
+ || page_zip_validate(page_zip, page, cursor->index));
#endif /* UNIV_ZIP_DEBUG */
if (dict_index_is_clust(cursor->index)
@@ -3273,7 +3275,7 @@ btr_cur_pessimistic_delete(
rec = btr_cur_get_rec(cursor);
page_zip = buf_block_get_page_zip(block);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
@@ -3283,7 +3285,7 @@ btr_cur_pessimistic_delete(
rec, offsets, page_zip,
rb_ctx, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3344,7 +3346,7 @@ btr_cur_pessimistic_delete(
page_cur_delete_rec(btr_cur_get_page_cur(cursor), index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
ut_ad(btr_check_node_ptr(index, block, mtr));
diff --git a/buf/buf0buf.c b/buf/buf0buf.c
index bbc1042ca78..d15cbac325a 100644
--- a/buf/buf0buf.c
+++ b/buf/buf0buf.c
@@ -279,7 +279,7 @@ the read requests for the whole area.
#ifndef UNIV_HOTBACKUP
/** Value in microseconds */
-static const int WAIT_FOR_READ = 5000;
+static const int WAIT_FOR_READ = 100;
/** Number of attemtps made to read in a page in the buffer pool */
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
@@ -2928,8 +2928,9 @@ wait_until_unfixed:
mutex_exit(&block->mutex);
if (io_fix == BUF_IO_READ) {
-
- os_thread_sleep(WAIT_FOR_READ);
+ /* wait by temporaly s-latch */
+ rw_lock_s_lock(&(block->lock));
+ rw_lock_s_unlock(&(block->lock));
} else {
break;
}
@@ -3937,7 +3938,7 @@ buf_page_io_complete(
ensures that this is the only thread that handles the i/o for this
block. */
- io_type = buf_page_get_io_fix(bpage);
+ io_type = buf_page_get_io_fix_unlocked(bpage);
ut_ad(io_type == BUF_IO_READ || io_type == BUF_IO_WRITE);
if (io_type == BUF_IO_READ) {
diff --git a/buf/buf0flu.c b/buf/buf0flu.c
index 39351cd3678..df39241d91c 100644
--- a/buf/buf0flu.c
+++ b/buf/buf0flu.c
@@ -915,7 +915,7 @@ flush:
"InnoDB: Page buf fix count %lu,"
" io fix %lu, state %lu\n",
(ulong)block->page.buf_fix_count,
- (ulong)buf_block_get_io_fix(block),
+ (ulong)buf_block_get_io_fix_unlocked(block),
(ulong)buf_block_get_state(block));
}
@@ -1115,7 +1115,7 @@ buf_flush_write_block_low(
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(!buf_flush_list_mutex_own(buf_pool));
ut_ad(!mutex_own(buf_page_get_mutex(bpage)));
- ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
+ ut_ad(buf_page_get_io_fix_unlocked(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0);
#ifdef UNIV_IBUF_COUNT_DEBUG
@@ -1181,10 +1181,10 @@ buf_flush_write_block_low(
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
-NOTE: buf_pool->mutex and block->mutex must be held upon entering this
-function, and they will be released by this function after flushing.
+NOTE: block->mutex must be held upon entering this function, and it will be
+released by this function after flushing.
This is loosely based on buf_flush_batch() and buf_flush_page().
-@return TRUE if the page was flushed and the mutexes released */
+@return TRUE if the page was flushed and the mutex released */
UNIV_INTERN
ibool
buf_flush_page_try(
@@ -1553,16 +1553,14 @@ scan:
Check if the block is modified and ready for flushing. If the the block
is ready to flush then flush the page and try o flush its neighbors.
-@return TRUE if buf_pool mutex was not released during this function.
+@return TRUE if LRU list mutex was not released during this function.
This does not guarantee that some pages were written as well.
Number of pages written are incremented to the count. */
static
ibool
buf_flush_page_and_try_neighbors(
/*=============================*/
- buf_page_t* bpage, /*!< in: buffer control block,
- must be
- buf_page_in_file(bpage) */
+ buf_page_t* bpage, /*!< in: buffer control block */
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU
or BUF_FLUSH_LIST */
ulint n_to_flush, /*!< in: number of pages to
diff --git a/buf/buf0lru.c b/buf/buf0lru.c
index 9f221e3eb82..2954c22b5ce 100644
--- a/buf/buf0lru.c
+++ b/buf/buf0lru.c
@@ -393,18 +393,18 @@ buf_flush_yield(
{
mutex_t* block_mutex;
+ block_mutex = buf_page_get_mutex(bpage);
+
+ ut_ad(mutex_own(block_mutex));
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(buf_page_in_file(bpage));
- block_mutex = buf_page_get_mutex(bpage);
-
- mutex_enter(block_mutex);
/* "Fix" the block so that the position cannot be
changed after we release the buffer pool and
block mutexes. */
buf_page_set_sticky(bpage);
- /* Now it is safe to release the buf_pool->mutex. */
+ /* Now it is safe to release the LRU list mutex. */
mutex_exit(&buf_pool->LRU_list_mutex);
mutex_exit(block_mutex);
@@ -415,7 +415,7 @@ buf_flush_yield(
mutex_enter(block_mutex);
/* "Unfix" the block now that we have both the
- buffer pool and block mutex again. */
+ LRU list and block mutex again. */
buf_page_unset_sticky(bpage);
mutex_exit(block_mutex);
}
@@ -431,7 +431,9 @@ buf_flush_try_yield(
/*================*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
buf_page_t* bpage, /*!< in/out: bpage to remove */
- ulint processed) /*!< in: number of pages processed */
+ ulint processed, /*!< in: number of pages processed */
+ ibool* must_restart) /*!< in/out: if TRUE, we have to
+ restart the flush list scan */
{
/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
loop we release buf_pool->mutex to let other threads
@@ -441,10 +443,40 @@ buf_flush_try_yield(
if (bpage != NULL
&& processed >= BUF_LRU_DROP_SEARCH_SIZE
- && buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
+ && buf_page_get_io_fix_unlocked(bpage) == BUF_IO_NONE) {
+
+ mutex_t* block_mutex;
buf_flush_list_mutex_exit(buf_pool);
+ /* We don't have to worry about bpage becoming a dangling
+ pointer by a compressed page flush list relocation because
+ buf_page_get_gen() won't be called for pages from this
+ tablespace. */
+
+ block_mutex = buf_page_get_mutex_enter(bpage);
+ if (UNIV_UNLIKELY(block_mutex == NULL)) {
+
+ buf_flush_list_mutex_enter(buf_pool);
+
+ *must_restart = TRUE;
+ return FALSE;
+ }
+
+ /* Recheck the I/O fix and the flush list presence now that we
+ hold the right mutex */
+ if (UNIV_UNLIKELY(buf_page_get_io_fix(bpage) != BUF_IO_NONE
+ || bpage->oldest_modification == 0)) {
+
+ mutex_exit(block_mutex);
+ buf_flush_list_mutex_enter(buf_pool);
+
+ *must_restart = TRUE;
+ return FALSE;
+ }
+
+ *must_restart = FALSE;
+
/* Release the LRU list and block mutex
to give the other threads a go. */
@@ -473,7 +505,9 @@ ibool
buf_flush_or_remove_page(
/*=====================*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
- buf_page_t* bpage) /*!< in/out: bpage to remove */
+ buf_page_t* bpage, /*!< in/out: bpage to remove */
+ ibool* must_restart) /*!< in/out: if TRUE, must restart the
+ flush list scan */
{
mutex_t* block_mutex;
ibool processed = FALSE;
@@ -487,7 +521,8 @@ buf_flush_or_remove_page(
buf_pool->mutex and block_mutex. It is safe to check
them while holding buf_pool->mutex only. */
- if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
+ if (UNIV_UNLIKELY(buf_page_get_io_fix_unlocked(bpage)
+ != BUF_IO_NONE)) {
/* We cannot remove this page during this scan
yet; maybe the system is currently reading it
@@ -496,21 +531,38 @@ buf_flush_or_remove_page(
} else {
/* We have to release the flush_list_mutex to obey the
- latching order. We are however guaranteed that the page
- will stay in the flush_list because buf_flush_remove()
- needs buf_pool->mutex as well (for the non-flush case). */
+ latching order. We are not however guaranteed that the page
+ will stay in the flush_list. */
buf_flush_list_mutex_exit(buf_pool);
+ /* We don't have to worry about bpage becoming a dangling
+ pointer by a compressed page flush list relocation because
+ buf_page_get_gen() won't be called for pages from this
+ tablespace. */
+
mutex_enter(block_mutex);
- ut_ad(bpage->oldest_modification != 0);
+ /* Recheck the page I/O fix and the flush list presence now
+ thatwe hold the right mutex. */
+ if (UNIV_UNLIKELY(buf_page_get_io_fix(bpage) != BUF_IO_NONE
+ || bpage->oldest_modification == 0)) {
- if (bpage->buf_fix_count == 0) {
+ /* The page became I/O-fixed or is not on the flush
+ list anymore, this invalidates any flush-list-page
+ pointers we have. */
+ *must_restart = TRUE;
- buf_flush_remove(bpage);
+ } else {
+
+ ut_ad(bpage->oldest_modification != 0);
- processed = TRUE;
+ if (bpage->buf_fix_count == 0) {
+
+ buf_flush_remove(bpage);
+
+ processed = TRUE;
+ }
}
mutex_exit(block_mutex);
@@ -541,11 +593,12 @@ buf_flush_or_remove_pages(
buf_page_t* bpage;
ulint processed = 0;
ibool all_freed = TRUE;
+ ibool must_restart = FALSE;
buf_flush_list_mutex_enter(buf_pool);
for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
- bpage != NULL;
+ !must_restart && bpage != NULL;
bpage = prev) {
ut_a(buf_page_in_file(bpage));
@@ -561,22 +614,31 @@ buf_flush_or_remove_pages(
/* Skip this block, as it does not belong to
the target space. */
- } else if (!buf_flush_or_remove_page(buf_pool, bpage)) {
+ } else if (!buf_flush_or_remove_page(buf_pool, bpage,
+ &must_restart)) {
/* Remove was unsuccessful, we have to try again
by scanning the entire list from the end. */
all_freed = FALSE;
}
+ if (UNIV_UNLIKELY(must_restart)) {
+ ut_ad(!all_freed);
+ break;
+ }
++processed;
/* Yield if we have hogged the CPU and mutexes for too long. */
- if (buf_flush_try_yield(buf_pool, prev, processed)) {
+ if (buf_flush_try_yield(buf_pool, prev, processed,
+ &must_restart)) {
+ ut_ad(!must_restart);
/* Reset the batch size counter if we had to yield. */
processed = 0;
+ } else if (UNIV_UNLIKELY(must_restart)) {
+ all_freed = FALSE;
}
}
@@ -641,41 +703,39 @@ scan_again:
/* No op */) {
buf_page_t* prev_bpage;
- mutex_t* block_mutex = NULL;
+ mutex_t* block_mutex;
ut_a(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
- /* bpage->space and bpage->io_fix are protected by
- buf_pool->mutex and the block_mutex. It is safe to check
- them while holding buf_pool->mutex only. */
+ block_mutex = buf_page_get_mutex_enter(bpage);
+
+ if (!block_mutex) {
+ /* It may be impossible case...
+ Something wrong, so will be scan_again */
+
+ all_freed = FALSE;
+ goto next_page;
+ }
if (buf_page_get_space(bpage) != id) {
/* Skip this block, as it does not belong to
the space that is being invalidated. */
+
+ mutex_exit(block_mutex);
goto next_page;
} else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
/* We cannot remove this page during this scan
yet; maybe the system is currently reading it
in, or flushing the modifications to the file */
+ mutex_exit(block_mutex);
all_freed = FALSE;
goto next_page;
} else {
- block_mutex = buf_page_get_mutex_enter(bpage);
-
- if (!block_mutex) {
- /* It may be impossible case...
- Something wrong, so will be scan_again */
-
- all_freed = FALSE;
- goto next_page;
- }
-
-
if (bpage->buf_fix_count > 0) {
mutex_exit(block_mutex);
@@ -1853,7 +1913,7 @@ alloc:
|| !buf_page_can_relocate(bpage)) {
not_freed:
if (b) {
- buf_buddy_free(buf_pool, b, sizeof *b, TRUE);
+ buf_page_free_descriptor(b);
}
if (!have_LRU_mutex)
mutex_exit(&buf_pool->LRU_list_mutex);
@@ -2196,7 +2256,9 @@ buf_LRU_block_remove_hashed_page(
break;
case FIL_PAGE_INDEX:
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(&bpage->zip, page));
+ ut_a(page_zip_validate(
+ &bpage->zip, page,
+ ((buf_block_t*) bpage)->index));
#endif /* UNIV_ZIP_DEBUG */
break;
default:
diff --git a/buf/buf0rea.c b/buf/buf0rea.c
index d41da6d8636..cf0a029df92 100644
--- a/buf/buf0rea.c
+++ b/buf/buf0rea.c
@@ -220,7 +220,10 @@ not_to_recover:
ut_ad(buf_page_in_file(bpage));
- thd_wait_begin(NULL, THD_WAIT_DISKIO);
+ if(sync) {
+ thd_wait_begin(NULL, THD_WAIT_DISKIO);
+ }
+
if (zip_size) {
*err = _fil_io(OS_FILE_READ | wake_later,
sync, space, zip_size, offset, 0, zip_size,
@@ -232,7 +235,10 @@ not_to_recover:
sync, space, 0, offset, 0, UNIV_PAGE_SIZE,
((buf_block_t*) bpage)->frame, bpage, trx);
}
- thd_wait_end(NULL);
+
+ if (sync) {
+ thd_wait_end(NULL);
+ }
if (*err == DB_TABLESPACE_DELETED) {
buf_read_page_handle_error(bpage);
diff --git a/dict/dict0dict.c b/dict/dict0dict.c
index 89f13188d95..461bdd55ad3 100644
--- a/dict/dict0dict.c
+++ b/dict/dict0dict.c
@@ -488,10 +488,12 @@ Looks for column n in an index.
ULINT_UNDEFINED if not contained */
UNIV_INTERN
ulint
-dict_index_get_nth_col_pos(
-/*=======================*/
- const dict_index_t* index, /*!< in: index */
- ulint n) /*!< in: column number */
+dict_index_get_nth_col_or_prefix_pos(
+/*=================================*/
+ const dict_index_t* index, /*!< in: index */
+ ulint n, /*!< in: column number */
+ ibool inc_prefix) /*!< in: TRUE=consider
+ column prefixes too */
{
const dict_field_t* field;
const dict_col_t* col;
@@ -513,7 +515,8 @@ dict_index_get_nth_col_pos(
for (pos = 0; pos < n_fields; pos++) {
field = dict_index_get_nth_field(index, pos);
- if (col == field->col && field->prefix_len == 0) {
+ if (col == field->col
+ && (inc_prefix || field->prefix_len == 0)) {
return(pos);
}
@@ -522,6 +525,20 @@ dict_index_get_nth_col_pos(
return(ULINT_UNDEFINED);
}
+/********************************************************************//**
+Looks for column n in an index.
+@return position in internal representation of the index;
+ULINT_UNDEFINED if not contained */
+UNIV_INTERN
+ulint
+dict_index_get_nth_col_pos(
+/*=======================*/
+ const dict_index_t* index, /*!< in: index */
+ ulint n) /*!< in: column number */
+{
+ return(dict_index_get_nth_col_or_prefix_pos(index, n, FALSE));
+}
+
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Returns TRUE if the index contains a column or a prefix of that column.
@@ -2085,7 +2102,6 @@ dict_index_build_internal_clust(
{
dict_index_t* new_index;
dict_field_t* field;
- ulint fixed_size;
ulint trx_id_pos;
ulint i;
ibool* indexed;
@@ -2162,7 +2178,7 @@ dict_index_build_internal_clust(
for (i = 0; i < trx_id_pos; i++) {
- fixed_size = dict_col_get_fixed_size(
+ ulint fixed_size = dict_col_get_fixed_size(
dict_index_get_nth_col(new_index, i),
dict_table_is_comp(table));
@@ -2179,7 +2195,20 @@ dict_index_build_internal_clust(
break;
}
- new_index->trx_id_offset += (unsigned int) fixed_size;
+ /* Add fixed_size to new_index->trx_id_offset.
+ Because the latter is a bit-field, an overflow
+ can theoretically occur. Check for it. */
+ fixed_size += new_index->trx_id_offset;
+
+ new_index->trx_id_offset = fixed_size;
+
+ if (new_index->trx_id_offset != fixed_size) {
+ /* Overflow. Pretend that this is a
+ variable-length PRIMARY KEY. */
+ ut_ad(0);
+ new_index->trx_id_offset = 0;
+ break;
+ }
}
}
diff --git a/fil/fil0fil.c b/fil/fil0fil.c
index 811b7583d50..f8d52009b7e 100644
--- a/fil/fil0fil.c
+++ b/fil/fil0fil.c
@@ -195,14 +195,16 @@ struct fil_space_struct {
requests on the file */
ibool stop_new_ops;
/*!< we set this TRUE when we start
- deleting a single-table tablespace */
- ibool is_being_deleted;
- /*!< this is set to TRUE when we start
- deleting a single-table tablespace and its
- file; when this flag is set no further i/o
- or flush requests can be placed on this space,
- though there may be such requests still being
- processed on this space */
+ deleting a single-table tablespace.
+ When this is set following new ops
+ are not allowed:
+ * read IO request
+ * ibuf merge
+ * file flush
+ Note that we can still possibly have
+ new write operations because we don't
+ check this flag when doing flush
+ batches. */
ulint purpose;/*!< FIL_TABLESPACE, FIL_LOG, or
FIL_ARCH_LOG */
UT_LIST_BASE_NODE_T(fil_node_t) chain;
@@ -865,7 +867,7 @@ fil_node_close_file(
ut_ad(node && system);
ut_ad(mutex_own(&(system->mutex)));
ut_a(node->open);
- ut_a(node->n_pending == 0 || node->space->is_being_deleted);
+ ut_a(node->n_pending == 0 || node->space->stop_new_ops);
ut_a(node->n_pending_flushes == 0);
#ifndef UNIV_HOTBACKUP
ut_a(node->modification_counter == node->flush_counter
@@ -1099,7 +1101,7 @@ fil_node_free(
ut_ad(node && system && space);
ut_ad(mutex_own(&(system->mutex)));
ut_a(node->magic_n == FIL_NODE_MAGIC_N);
- ut_a(node->n_pending == 0 || space->is_being_deleted);
+ ut_a(node->n_pending == 0 || space->stop_new_ops);
if (node->open) {
/* We fool the assertion in fil_node_close_file() to think
@@ -1297,7 +1299,6 @@ try_again:
space->stop_ios = FALSE;
space->stop_new_ops = FALSE;
- space->is_being_deleted = FALSE;
space->purpose = purpose;
space->size = 0;
space->flags = flags;
@@ -2325,11 +2326,9 @@ try_again:
return(FALSE);
}
- ut_a(space);
+ ut_a(space->stop_new_ops);
ut_a(space->n_pending_ops == 0);
- space->is_being_deleted = TRUE;
-
ut_a(UT_LIST_GET_LEN(space->chain) == 1);
node = UT_LIST_GET_FIRST(space->chain);
@@ -2372,12 +2371,26 @@ try_again:
rw_lock_x_lock(&space->latch);
#ifndef UNIV_HOTBACKUP
- /* Invalidate in the buffer pool all pages belonging to the
- tablespace. Since we have set space->is_being_deleted = TRUE, readahead
- or ibuf merge can no longer read more pages of this tablespace to the
- buffer pool. Thus we can clean the tablespace out of the buffer pool
- completely and permanently. The flag is_being_deleted also prevents
- fil_flush() from being applied to this tablespace. */
+ /* IMPORTANT: Because we have set space::stop_new_ops there
+ can't be any new ibuf merges, reads or flushes. We are here
+ because node::n_pending was zero above. However, it is still
+ possible to have pending read and write requests:
+
+ A read request can happen because the reader thread has
+ gone through the ::stop_new_ops check in buf_page_init_for_read()
+ before the flag was set and has not yet incremented ::n_pending
+ when we checked it above.
+
+ A write request can be issued any time because we don't check
+ the ::stop_new_ops flag when queueing a block for write.
+
+ We deal with pending write requests in the following function
+ where we'd minimally evict all dirty pages belonging to this
+ space from the flush_list. Not that if a block is IO-fixed
+ we'll wait for IO to complete.
+
+ To deal with potential read requests by checking the
+ ::stop_new_ops flag in fil_io() */
if (srv_lazy_drop_table) {
buf_LRU_mark_space_was_deleted(id);
@@ -2393,6 +2406,15 @@ try_again:
mutex_enter(&fil_system->mutex);
+ /* Double check the sanity of pending ops after reacquiring
+ the fil_system::mutex. */
+ if (fil_space_get_by_id(id)) {
+ ut_a(space->n_pending_ops == 0);
+ ut_a(UT_LIST_GET_LEN(space->chain) == 1);
+ node = UT_LIST_GET_FIRST(space->chain);
+ ut_a(node->n_pending == 0);
+ }
+
success = fil_space_free(id, TRUE);
mutex_exit(&fil_system->mutex);
@@ -2450,7 +2472,7 @@ fil_tablespace_is_being_deleted(
ut_a(space != NULL);
- is_being_deleted = space->is_being_deleted;
+ is_being_deleted = space->stop_new_ops;
mutex_exit(&fil_system->mutex);
@@ -4531,7 +4553,7 @@ fil_tablespace_deleted_or_being_deleted_in_mem(
space = fil_space_get_by_id(id);
- if (space == NULL || space->is_being_deleted) {
+ if (space == NULL || space->stop_new_ops) {
mutex_exit(&fil_system->mutex);
return(TRUE);
@@ -5271,7 +5293,9 @@ _fil_io(
space = fil_space_get_by_id(space_id);
- if (!space) {
+ /* If we are deleting a tablespace we don't allow any read
+ operations on that. However, we do allow write operations. */
+ if (!space || (type == OS_FILE_READ && space->stop_new_ops)) {
mutex_exit(&fil_system->mutex);
ut_print_timestamp(stderr);
@@ -5362,8 +5386,8 @@ _fil_io(
/* Do aio */
- ut_a(byte_offset % OS_FILE_LOG_BLOCK_SIZE == 0);
- ut_a((len % OS_FILE_LOG_BLOCK_SIZE) == 0);
+ ut_a(byte_offset % OS_MIN_LOG_BLOCK_SIZE == 0);
+ ut_a((len % OS_MIN_LOG_BLOCK_SIZE) == 0);
if (srv_pass_corrupt_table == 1 && space->is_corrupt) {
/* should ignore i/o for the crashed space */
@@ -5551,7 +5575,7 @@ fil_aio_wait(
&& ((buf_page_t*)message)->space_was_being_deleted) {
/* intended not to be uncompress read page */
- ut_a(buf_page_get_io_fix(message) == BUF_IO_WRITE
+ ut_a(buf_page_get_io_fix_unlocked(message) == BUF_IO_WRITE
|| !buf_page_get_zip_size(message)
|| buf_page_get_state(message) != BUF_BLOCK_FILE_PAGE);
@@ -5612,7 +5636,7 @@ fil_flush(
space = fil_space_get_by_id(space_id);
- if (!space || space->is_being_deleted) {
+ if (!space || space->stop_new_ops) {
mutex_exit(&fil_system->mutex);
return;
@@ -5743,7 +5767,7 @@ fil_flush_file_spaces(
space;
space = UT_LIST_GET_NEXT(unflushed_spaces, space)) {
- if (space->purpose == purpose && !space->is_being_deleted) {
+ if (space->purpose == purpose && !space->stop_new_ops) {
space_ids[n_space_ids++] = space->id;
}
diff --git a/handler/ha_innodb.cc b/handler/ha_innodb.cc
index 2d476d1bdf5..f87f9138c0f 100644
--- a/handler/ha_innodb.cc
+++ b/handler/ha_innodb.cc
@@ -85,6 +85,7 @@ extern "C" {
#include "row0sel.h"
#include "row0upd.h"
#include "log0log.h"
+#include "log0online.h"
#include "lock0lock.h"
#include "dict0crea.h"
#include "btr0cur.h"
@@ -297,6 +298,7 @@ static PSI_mutex_info all_innodb_mutexes[] = {
{&ibuf_pessimistic_insert_mutex_key,
"ibuf_pessimistic_insert_mutex", 0},
{&kernel_mutex_key, "kernel_mutex", 0},
+ {&log_bmp_sys_mutex_key, "log_bmp_sys_mutex", 0},
{&log_sys_mutex_key, "log_sys_mutex", 0},
# ifdef UNIV_MEM_DEBUG
{&mem_hash_mutex_key, "mem_hash_mutex", 0},
@@ -438,6 +440,25 @@ uint
innobase_alter_table_flags(
/*=======================*/
uint flags);
+/************************************************************//**
+Synchronously read and parse the redo log up to the last
+checkpoint to write the changed page bitmap.
+@return 0 to indicate success. Current implementation cannot fail. */
+static
+my_bool
+innobase_flush_changed_page_bitmaps();
+/*==================================*/
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+@return 0 to indicate success, 1 for failure. */
+static
+my_bool
+innobase_purge_changed_page_bitmaps(
+/*================================*/
+ ulonglong lsn); /*!< in: LSN to purge files up to */
static const char innobase_hton_name[]= "InnoDB";
@@ -1213,11 +1234,23 @@ convert_error_code_to_mysql(
case DB_TABLE_NOT_FOUND:
return(HA_ERR_NO_SUCH_TABLE);
- case DB_TOO_BIG_RECORD:
- my_error(ER_TOO_BIG_ROWSIZE, MYF(0),
- page_get_free_space_of_empty(flags
- & DICT_TF_COMPACT) / 2);
+ case DB_TOO_BIG_RECORD: {
+ /* If prefix is true then a 768-byte prefix is stored
+ locally for BLOB fields. Refer to dict_table_get_format() */
+ bool prefix = ((flags & DICT_TF_FORMAT_MASK)
+ >> DICT_TF_FORMAT_SHIFT) < UNIV_FORMAT_B;
+ my_printf_error(ER_TOO_BIG_ROWSIZE,
+ "Row size too large (> %lu). Changing some columns "
+ "to TEXT or BLOB %smay help. In current row "
+ "format, BLOB prefix of %d bytes is stored inline.",
+ MYF(0),
+ page_get_free_space_of_empty(flags &
+ DICT_TF_COMPACT) / 2,
+ prefix ? "or using ROW_FORMAT=DYNAMIC "
+ "or ROW_FORMAT=COMPRESSED ": "",
+ prefix ? DICT_MAX_FIXED_COL_LEN : 0);
return(HA_ERR_TO_BIG_ROW);
+ }
case DB_TOO_BIG_INDEX_COL:
my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0),
@@ -1678,19 +1711,19 @@ innobase_next_autoinc(
ut_a(block > 0);
ut_a(max_value > 0);
- /* Current value should never be greater than the maximum. */
- ut_a(current <= max_value);
-
/* According to MySQL documentation, if the offset is greater than
the step then the offset is ignored. */
if (offset > block) {
offset = 0;
}
- /* Check for overflow. */
+ /* Check for overflow. Current can be > max_value if the value is
+ in reality a negative value.The visual studio compilers converts
+ large double values automatically into unsigned long long datatype
+ maximum value */
if (block >= max_value
|| offset > max_value
- || current == max_value
+ || current >= max_value
|| max_value - offset <= offset) {
next_value = max_value;
@@ -1767,7 +1800,7 @@ innobase_trx_init(
trx->fake_changes = THDVAR(thd, fake_changes);
#ifdef EXTENDED_SLOWLOG
- if (thd_log_slow_verbosity(thd) & SLOG_V_INNODB) {
+ if (thd_log_slow_verbosity(thd) & (1ULL << SLOG_V_INNODB)) {
trx->take_stats = TRUE;
} else {
trx->take_stats = FALSE;
@@ -2651,6 +2684,10 @@ innobase_init(
innobase_hton->flags=HTON_NO_FLAGS;
innobase_hton->release_temporary_latches=innobase_release_temporary_latches;
innobase_hton->alter_table_flags = innobase_alter_table_flags;
+ innobase_hton->flush_changed_page_bitmaps
+ = innobase_flush_changed_page_bitmaps;
+ innobase_hton->purge_changed_page_bitmaps
+ = innobase_purge_changed_page_bitmaps;
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
@@ -2721,6 +2758,7 @@ innobase_init(
} else {
srv_log_block_size = 512;
}
+ ut_ad (srv_log_block_size >= OS_MIN_LOG_BLOCK_SIZE);
if (!srv_log_block_size) {
fprintf(stderr,
@@ -3270,6 +3308,36 @@ innobase_alter_table_flags(
| HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE);
}
+/************************************************************//**
+Synchronously read and parse the redo log up to the last
+checkpoint to write the changed page bitmap.
+@return 0 to indicate success. Current implementation cannot fail. */
+static
+my_bool
+innobase_flush_changed_page_bitmaps()
+/*=================================*/
+{
+ if (srv_track_changed_pages) {
+ os_event_reset(srv_checkpoint_completed_event);
+ log_online_follow_redo_log();
+ }
+ return FALSE;
+}
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == IB_ULONGLONG_MAX (i.e. set by RESET request),
+restart the bitmap file sequence, otherwise continue it.
+@return 0 to indicate success, 1 for failure. */
+static
+my_bool
+innobase_purge_changed_page_bitmaps(
+/*================================*/
+ ulonglong lsn) /*!< in: LSN to purge files up to */
+{
+ return (my_bool)log_online_purge_changed_page_bitmaps(lsn);
+}
+
/****************************************************************//**
Copy the current replication position from MySQL to a transaction. */
static
@@ -6928,7 +6996,7 @@ ha_innobase::change_active_index(
"InnoDB: Index %s for table %s is"
" marked as corrupted",
index_name, table_name);
- DBUG_RETURN(1);
+ DBUG_RETURN(HA_ERR_INDEX_CORRUPT);
} else {
push_warning_printf(
user_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -6939,7 +7007,7 @@ ha_innobase::change_active_index(
/* The caller seems to ignore this. Thus, we must check
this again in row_search_for_mysql(). */
- DBUG_RETURN(2);
+ DBUG_RETURN(HA_ERR_TABLE_DEF_CHANGED);
}
ut_a(prebuilt->search_tuple != 0);
@@ -12389,7 +12457,8 @@ static MYSQL_SYSVAR_ULONG(page_size, innobase_page_size,
static MYSQL_SYSVAR_ULONG(log_block_size, innobase_log_block_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"###EXPERIMENTAL###: The log block size of the transaction log file. Changing for created log file is not supported. Use on your own risk!",
- NULL, NULL, (1 << 9)/*512*/, (1 << 9)/*512*/, (1 << UNIV_PAGE_SIZE_SHIFT_MAX), 0);
+ NULL, NULL, (1 << 9)/*512*/, OS_MIN_LOG_BLOCK_SIZE,
+ (1 << UNIV_PAGE_SIZE_SHIFT_MAX), 0);
static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir,
PLUGIN_VAR_READONLY,
@@ -12813,7 +12882,7 @@ static MYSQL_SYSVAR_ULONGLONG(max_bitmap_file_size, srv_max_bitmap_file_size,
"The maximum size of changed page bitmap files",
NULL, NULL, 100*1024*1024ULL, 4096ULL, ULONGLONG_MAX, 0);
-static MYSQL_SYSVAR_ULONGLONG(changed_pages_limit, srv_changed_pages_limit,
+static MYSQL_SYSVAR_ULONGLONG(max_changed_pages, srv_max_changed_pages,
PLUGIN_VAR_RQCMDARG,
"The maximum number of rows for "
"INFORMATION_SCHEMA.INNODB_CHANGED_PAGES table, "
@@ -12823,8 +12892,8 @@ static MYSQL_SYSVAR_ULONGLONG(changed_pages_limit, srv_changed_pages_limit,
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
static MYSQL_SYSVAR_UINT(change_buffering_debug, ibuf_debug,
PLUGIN_VAR_RQCMDARG,
- "Debug flags for InnoDB change buffering (0=none)",
- NULL, NULL, 0, 0, 1, 0);
+ "Debug flags for InnoDB change buffering (0=none, 2=crash at merge)",
+ NULL, NULL, 0, 0, 2, 0);
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
static MYSQL_SYSVAR_BOOL(random_read_ahead, srv_random_read_ahead,
@@ -13114,7 +13183,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(change_buffering),
MYSQL_SYSVAR(track_changed_pages),
MYSQL_SYSVAR(max_bitmap_file_size),
- MYSQL_SYSVAR(changed_pages_limit),
+ MYSQL_SYSVAR(max_changed_pages),
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
MYSQL_SYSVAR(change_buffering_debug),
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
diff --git a/handler/ha_innodb.h b/handler/ha_innodb.h
index 7645f537f33..d52e0fa74aa 100644
--- a/handler/ha_innodb.h
+++ b/handler/ha_innodb.h
@@ -81,12 +81,13 @@ class ha_innobase: public handler
uchar* upd_buf; /*!< buffer used in updates */
ulint upd_buf_size; /*!< the size of upd_buf in bytes */
- uchar srch_key_val1[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
- uchar srch_key_val2[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
+ uchar srch_key_val1[MAX_KEY_LENGTH + MAX_REF_PARTS*2];
+ uchar srch_key_val2[MAX_KEY_LENGTH + MAX_REF_PARTS*2];
/*!< buffers used in converting
search key values from MySQL format
- to InnoDB format. "+ 2" for the two
- bytes where the length is stored */
+ to InnoDB format. For each column
+ 2 bytes are used to store length,
+ hence MAX_REF_PARTS*2. */
Table_flags int_table_flags;
uint primary_key;
ulong start_of_scan; /*!< this is set to 1 when we are
diff --git a/handler/handler0alter.cc b/handler/handler0alter.cc
index 46ffc28180d..65295a12c19 100644
--- a/handler/handler0alter.cc
+++ b/handler/handler0alter.cc
@@ -112,13 +112,17 @@ innobase_col_to_mysql(
/* These column types should never be shipped to MySQL. */
ut_ad(0);
- case DATA_CHAR:
case DATA_FIXBINARY:
case DATA_FLOAT:
case DATA_DOUBLE:
case DATA_DECIMAL:
/* Above are the valid column types for MySQL data. */
ut_ad(flen == len);
+ /* fall through */
+ case DATA_CHAR:
+ /* We may have flen > len when there is a shorter
+ prefix on a CHAR column. */
+ ut_ad(flen >= len);
#else /* UNIV_DEBUG */
default:
#endif /* UNIV_DEBUG */
@@ -151,7 +155,7 @@ innobase_rec_to_mysql(
field->reset();
- ipos = dict_index_get_nth_col_pos(index, i);
+ ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE);
if (UNIV_UNLIKELY(ipos == ULINT_UNDEFINED)) {
null_field:
diff --git a/handler/i_s.cc b/handler/i_s.cc
index 8a95840ddaf..4a1a3df1d5d 100644
--- a/handler/i_s.cc
+++ b/handler/i_s.cc
@@ -168,7 +168,8 @@ do { \
} \
} while (0)
-#if !defined __STRICT_ANSI__ && defined __GNUC__ && (__GNUC__) > 2 && !defined __INTEL_COMPILER
+#if !defined __STRICT_ANSI__ && defined __GNUC__ && (__GNUC__) > 2 && \
+ !defined __INTEL_COMPILER && !defined __clang__
#define STRUCT_FLD(name, value) name: value
#else
#define STRUCT_FLD(name, value) value
@@ -7496,8 +7497,8 @@ i_s_innodb_changed_pages_fill(
}
while(log_online_bitmap_iterator_next(&i) &&
- (!srv_changed_pages_limit ||
- output_rows_num < srv_changed_pages_limit) &&
+ (!srv_max_changed_pages ||
+ output_rows_num < srv_max_changed_pages) &&
/*
There is no need to compare both start LSN and end LSN fields
with maximum value. It's enough to compare only start LSN.
diff --git a/ibuf/ibuf0ibuf.c b/ibuf/ibuf0ibuf.c
index 77305e42fb1..96c264b32b4 100644
--- a/ibuf/ibuf0ibuf.c
+++ b/ibuf/ibuf0ibuf.c
@@ -2912,6 +2912,14 @@ ibuf_get_volume_buffered_count_func(
ut_a(len == 1);
ut_ad(trx_sys_multiple_tablespace_format);
+ if (rec_get_deleted_flag(rec, 0)) {
+ /* This record has been merged already,
+ but apparently the system crashed before
+ the change was discarded from the buffer.
+ Pretend that the record does not exist. */
+ return(0);
+ }
+
types = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len);
switch (UNIV_EXPECT(len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE,
@@ -4224,11 +4232,11 @@ ibuf_delete(
page, 1);
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&page_cur, index, offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_zip) {
@@ -4333,6 +4341,22 @@ ibuf_delete_rec(
ut_ad(ibuf_rec_get_page_no(mtr, btr_pcur_get_rec(pcur)) == page_no);
ut_ad(ibuf_rec_get_space(mtr, btr_pcur_get_rec(pcur)) == space);
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+ if (ibuf_debug == 2) {
+ /* Inject a fault (crash). We do this before trying
+ optimistic delete, because a pessimistic delete in the
+ change buffer would require a larger test case. */
+
+ /* Flag the buffered record as processed, to avoid
+ an assertion failure after crash recovery. */
+ btr_cur_set_deleted_flag_for_ibuf(
+ btr_pcur_get_rec(pcur), NULL, TRUE, mtr);
+ ibuf_mtr_commit(mtr);
+ log_make_checkpoint_at(IB_ULONGLONG_MAX, TRUE);
+ DBUG_SUICIDE();
+ }
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), mtr);
if (success) {
@@ -4367,7 +4391,13 @@ ibuf_delete_rec(
ut_ad(ibuf_rec_get_page_no(mtr, btr_pcur_get_rec(pcur)) == page_no);
ut_ad(ibuf_rec_get_space(mtr, btr_pcur_get_rec(pcur)) == space);
- /* We have to resort to a pessimistic delete from ibuf */
+ /* We have to resort to a pessimistic delete from ibuf.
+ Delete-mark the record so that it will not be applied again,
+ in case the server crashes before the pessimistic delete is
+ made persistent. */
+ btr_cur_set_deleted_flag_for_ibuf(
+ btr_pcur_get_rec(pcur), NULL, TRUE, mtr);
+
btr_pcur_store_position(pcur, mtr);
ibuf_btr_pcur_commit_specify_mtr(pcur, mtr);
@@ -4448,7 +4478,7 @@ ibuf_merge_or_delete_for_page(
ut_ad(!block || buf_block_get_space(block) == space);
ut_ad(!block || buf_block_get_page_no(block) == page_no);
ut_ad(!block || buf_block_get_zip_size(block) == zip_size);
- ut_ad(!block || buf_block_get_io_fix(block) == BUF_IO_READ);
+ ut_ad(!block || buf_block_get_io_fix_unlocked(block) == BUF_IO_READ);
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE
|| trx_sys_hdr_page(space, page_no)) {
@@ -4648,7 +4678,7 @@ loop:
fputs("InnoDB: Discarding record\n ", stderr);
rec_print_old(stderr, rec);
fputs("\nInnoDB: from the insert buffer!\n\n", stderr);
- } else if (block) {
+ } else if (block && !rec_get_deleted_flag(rec, 0)) {
/* Now we have at pcur a record which should be
applied on the index page; NOTE that the call below
copies pointers to fields in rec, and we must
diff --git a/include/btr0cur.h b/include/btr0cur.h
index cb44129aeb5..1fe67e1dfe4 100644
--- a/include/btr0cur.h
+++ b/include/btr0cur.h
@@ -636,7 +636,7 @@ btr_cur_set_deleted_flag_for_ibuf(
when the tablespace is
uncompressed */
ibool val, /*!< in: value to set */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr); /*!< in/out: mini-transaction */
/*######################################################################*/
/** In the pessimistic delete, if the page data size drops below this
diff --git a/include/buf0buf.h b/include/buf0buf.h
index 5e3eeb77279..bbce9d31bd1 100644
--- a/include/buf0buf.h
+++ b/include/buf0buf.h
@@ -958,7 +958,7 @@ buf_block_set_file_page(
ulint space, /*!< in: tablespace id */
ulint page_no);/*!< in: page number */
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -967,7 +967,17 @@ buf_page_get_io_fix(
const buf_page_t* bpage) /*!< in: pointer to the control block */
__attribute__((pure));
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_page_get_io_fix_unlocked(
+/*=========================*/
+ const buf_page_t* bpage) /*!< in: pointer to the control block */
+ __attribute__((pure));
+/*********************************************************************//**
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -976,6 +986,16 @@ buf_block_get_io_fix(
const buf_block_t* block) /*!< in: pointer to the control block */
__attribute__((pure));
/*********************************************************************//**
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_block_get_io_fix_unlocked(
+/*==========================*/
+ const buf_block_t* block) /*!< in: pointer to the control block */
+ __attribute__((pure));
+/*********************************************************************//**
Sets the io_fix state of a block. */
UNIV_INLINE
void
diff --git a/include/buf0buf.ic b/include/buf0buf.ic
index 221f86d9d62..a8691154f97 100644
--- a/include/buf0buf.ic
+++ b/include/buf0buf.ic
@@ -434,7 +434,7 @@ buf_block_set_file_page(
}
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -442,6 +442,20 @@ buf_page_get_io_fix(
/*================*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
{
+ ut_ad(mutex_own(buf_page_get_mutex(bpage)));
+ return buf_page_get_io_fix_unlocked(bpage);
+}
+
+/*********************************************************************//**
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_page_get_io_fix_unlocked(
+/*=========================*/
+ const buf_page_t* bpage) /*!< in: pointer to the control block */
+{
enum buf_io_fix io_fix = (enum buf_io_fix) bpage->io_fix;
#ifdef UNIV_DEBUG
switch (io_fix) {
@@ -457,7 +471,7 @@ buf_page_get_io_fix(
}
/*********************************************************************//**
-Gets the io_fix state of a block.
+Gets the io_fix state of a block. Requires that the block mutex is held.
@return io_fix state */
UNIV_INLINE
enum buf_io_fix
@@ -469,6 +483,19 @@ buf_block_get_io_fix(
}
/*********************************************************************//**
+Gets the io_fix state of a block. Does not assert that the block mutex is
+held, to be used in the cases where it is safe not to hold it.
+@return io_fix state */
+UNIV_INLINE
+enum buf_io_fix
+buf_block_get_io_fix_unlocked(
+/*==========================*/
+ const buf_block_t* block) /*!< in: pointer to the control block */
+{
+ return(buf_page_get_io_fix_unlocked(&block->page));
+}
+
+/*********************************************************************//**
Sets the io_fix state of a block. */
UNIV_INLINE
void
diff --git a/include/dict0dict.h b/include/dict0dict.h
index 6c1c4117c05..1dd0b3f5082 100644
--- a/include/dict0dict.h
+++ b/include/dict0dict.h
@@ -910,6 +910,18 @@ dict_index_get_nth_col_pos(
const dict_index_t* index, /*!< in: index */
ulint n); /*!< in: column number */
/********************************************************************//**
+Looks for column n in an index.
+@return position in internal representation of the index;
+ULINT_UNDEFINED if not contained */
+UNIV_INTERN
+ulint
+dict_index_get_nth_col_or_prefix_pos(
+/*=================================*/
+ const dict_index_t* index, /*!< in: index */
+ ulint n, /*!< in: column number */
+ ibool inc_prefix); /*!< in: TRUE=consider
+ column prefixes too */
+/********************************************************************//**
Returns TRUE if the index contains a column or a prefix of that column.
@return TRUE if contains the column or its prefix */
UNIV_INTERN
diff --git a/include/dict0mem.h b/include/dict0mem.h
index 54593a0b9c7..630942ae2ac 100644
--- a/include/dict0mem.h
+++ b/include/dict0mem.h
@@ -377,10 +377,15 @@ struct dict_index_struct{
unsigned type:DICT_IT_BITS;
/*!< index type (DICT_CLUSTERED, DICT_UNIQUE,
DICT_UNIVERSAL, DICT_IBUF, DICT_CORRUPT) */
- unsigned trx_id_offset:10;/*!< position of the trx id column
+#define MAX_KEY_LENGTH_BITS 12
+ unsigned trx_id_offset:MAX_KEY_LENGTH_BITS;
+ /*!< position of the trx id column
in a clustered index record, if the fields
before it are known to be of a fixed size,
0 otherwise */
+#if (1<<MAX_KEY_LENGTH_BITS) < MAX_KEY_LENGTH
+# error (1<<MAX_KEY_LENGTH_BITS) < MAX_KEY_LENGTH
+#endif
unsigned n_user_defined_cols:10;
/*!< number of columns the user defined to
be in the index: in the internal
diff --git a/include/log0online.h b/include/log0online.h
index e7c3f301e45..999a317780e 100644
--- a/include/log0online.h
+++ b/include/log0online.h
@@ -41,23 +41,51 @@ typedef struct log_bitmap_iterator_struct log_bitmap_iterator_t;
Initializes the online log following subsytem. */
UNIV_INTERN
void
-log_online_read_init();
-/*===================*/
+log_online_read_init(void);
+/*=======================*/
/*********************************************************************//**
Shuts down the online log following subsystem. */
UNIV_INTERN
void
-log_online_read_shutdown();
-/*=======================*/
+log_online_read_shutdown(void);
+/*===========================*/
/*********************************************************************//**
Reads and parses the redo log up to last checkpoint LSN to build the changed
-page bitmap which is then written to disk. */
+page bitmap which is then written to disk.
+
+@return TRUE if log tracking succeeded, FALSE if bitmap write I/O error */
UNIV_INTERN
-void
-log_online_follow_redo_log();
-/*=========================*/
+ibool
+log_online_follow_redo_log(void);
+/*=============================*/
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+
+@return FALSE to indicate success, TRUE for failure. */
+UNIV_INTERN
+ibool
+log_online_purge_changed_page_bitmaps(
+/*==================================*/
+ ib_uint64_t lsn); /*!<in: LSN to purge files up to */
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+
+@return FALSE to indicate success, TRUE for failure. */
+UNIV_INTERN
+ibool
+log_online_purge_changed_page_bitmaps(
+/*==================================*/
+ ib_uint64_t lsn); /*!<in: LSN to purge files up to */
#define LOG_BITMAP_ITERATOR_START_LSN(i) \
((i).start_lsn)
diff --git a/include/os0file.h b/include/os0file.h
index dd141f280e1..a4e13777680 100644
--- a/include/os0file.h
+++ b/include/os0file.h
@@ -190,6 +190,8 @@ extern ulint os_n_file_reads;
extern ulint os_n_file_writes;
extern ulint os_n_fsyncs;
+#define OS_MIN_LOG_BLOCK_SIZE 512
+
extern ulint srv_log_block_size;
#ifdef UNIV_PFS_IO
diff --git a/include/page0zip.h b/include/page0zip.h
index fe3d2e52e0b..23a2cac618b 100644
--- a/include/page0zip.h
+++ b/include/page0zip.h
@@ -156,9 +156,10 @@ page_zip_validate_low(
/*==================*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index, /*!< in: index of the page, if known */
ibool sloppy) /*!< in: FALSE=strict,
TRUE=ignore the MIN_REC_FLAG */
- __attribute__((nonnull));
+ __attribute__((nonnull(1,2)));
/**********************************************************************//**
Check that the compressed and decompressed pages match. */
UNIV_INTERN
@@ -166,8 +167,9 @@ ibool
page_zip_validate(
/*==============*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
- const page_t* page) /*!< in: uncompressed page */
- __attribute__((nonnull));
+ const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index) /*!< in: index of the page, if known */
+ __attribute__((nonnull(1,2)));
#endif /* UNIV_ZIP_DEBUG */
/**********************************************************************//**
diff --git a/include/row0undo.h b/include/row0undo.h
index 6eb4ca448b3..9420d022e3b 100644
--- a/include/row0undo.h
+++ b/include/row0undo.h
@@ -87,10 +87,6 @@ that index record. */
enum undo_exec {
UNDO_NODE_FETCH_NEXT = 1, /*!< we should fetch the next
undo log record */
- UNDO_NODE_PREV_VERS, /*!< the roll ptr to previous
- version of a row is stored in
- node, and undo should be done
- based on it */
UNDO_NODE_INSERT, /*!< undo a fresh insert of a
row to a table */
UNDO_NODE_MODIFY /*!< undo a modify operation
@@ -108,9 +104,6 @@ struct undo_node_struct{
undo_no_t undo_no;/*!< undo number of the record */
ulint rec_type;/*!< undo log record type: TRX_UNDO_INSERT_REC,
... */
- roll_ptr_t new_roll_ptr;
- /*!< roll ptr to restore to clustered index
- record */
trx_id_t new_trx_id; /*!< trx id to restore to clustered index
record */
btr_pcur_t pcur; /*!< persistent cursor used in searching the
diff --git a/include/row0upd.ic b/include/row0upd.ic
index 10646241125..6706c9f8c69 100644
--- a/include/row0upd.ic
+++ b/include/row0upd.ic
@@ -28,6 +28,7 @@ Created 12/27/1996 Heikki Tuuri
# include "trx0trx.h"
# include "trx0undo.h"
# include "row0row.h"
+# include "lock0lock.h"
#endif /* !UNIV_HOTBACKUP */
#include "page0zip.h"
@@ -171,6 +172,8 @@ row_upd_rec_sys_fields(
#if DATA_TRX_ID + 1 != DATA_ROLL_PTR
# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
#endif
+ ut_ad(lock_check_trx_id_sanity(trx_read_trx_id(rec + offset),
+ rec, index, offsets, FALSE));
trx_write_trx_id(rec + offset, trx->id);
trx_write_roll_ptr(rec + offset + DATA_TRX_ID_LEN, roll_ptr);
}
diff --git a/include/srv0srv.h b/include/srv0srv.h
index 84422772fc1..dd3f8f6abab 100644
--- a/include/srv0srv.h
+++ b/include/srv0srv.h
@@ -145,7 +145,7 @@ extern my_bool srv_track_changed_pages;
extern ib_uint64_t srv_max_bitmap_file_size;
extern
-ulonglong srv_changed_pages_limit;
+ulonglong srv_max_changed_pages;
extern ibool srv_auto_extend_last_data_file;
extern ulint srv_last_file_size_max;
diff --git a/include/sync0sync.h b/include/sync0sync.h
index 33c95a94804..263b7b0494a 100644
--- a/include/sync0sync.h
+++ b/include/sync0sync.h
@@ -89,6 +89,7 @@ extern mysql_pfs_key_t hash_table_mutex_key;
extern mysql_pfs_key_t ibuf_bitmap_mutex_key;
extern mysql_pfs_key_t ibuf_mutex_key;
extern mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key;
+extern mysql_pfs_key_t log_bmp_sys_mutex_key;
extern mysql_pfs_key_t log_sys_mutex_key;
extern mysql_pfs_key_t log_flush_order_mutex_key;
extern mysql_pfs_key_t kernel_mutex_key;
@@ -672,6 +673,7 @@ or row lock! */
#define SYNC_TRX_LOCK_HEAP 298
#define SYNC_TRX_SYS_HEADER 290
#define SYNC_PURGE_QUEUE 200
+#define SYNC_LOG_ONLINE 175
#define SYNC_LOG 170
#define SYNC_LOG_FLUSH_ORDER 156
#define SYNC_RECV 168
diff --git a/include/univ.i b/include/univ.i
index a1fff561682..e5f43bc56ea 100644
--- a/include/univ.i
+++ b/include/univ.i
@@ -301,6 +301,24 @@ management to ensure correct alignment for doubles etc. */
========================
*/
+/** There are currently two InnoDB file formats which are used to group
+features with similar restrictions and dependencies. Using an enum allows
+switch statements to give a compiler warning when a new one is introduced. */
+enum innodb_file_formats_enum {
+ /** Antelope File Format: InnoDB/MySQL up to 5.1.
+ This format includes REDUNDANT and COMPACT row formats */
+ UNIV_FORMAT_A = 0,
+
+ /** Barracuda File Format: Introduced in InnoDB plugin for 5.1:
+ This format includes COMPRESSED and DYNAMIC row formats. It
+ includes the ability to create secondary indexes from data that
+ is not on the clustered index page and the ability to store more
+ data off the clustered index page. */
+ UNIV_FORMAT_B = 1
+};
+
+typedef enum innodb_file_formats_enum innodb_file_formats_t;
+
/* The 2-logarithm of UNIV_PAGE_SIZE: */
/* #define UNIV_PAGE_SIZE_SHIFT 14 */
#define UNIV_PAGE_SIZE_SHIFT_MAX 14
diff --git a/lock/lock0lock.c b/lock/lock0lock.c
index f172ad6695b..3829bd1cf03 100644
--- a/lock/lock0lock.c
+++ b/lock/lock0lock.c
@@ -4658,12 +4658,16 @@ loop:
lock_mutex_exit_kernel();
- mtr_start(&mtr);
+ if (srv_show_verbose_locks) {
+ mtr_start(&mtr);
- buf_page_get_with_no_latch(space, zip_size,
- page_no, &mtr);
+ buf_page_get_gen(space, zip_size, page_no,
+ RW_NO_LATCH, NULL,
+ BUF_GET_POSSIBLY_FREED,
+ __FILE__, __LINE__, &mtr);
- mtr_commit(&mtr);
+ mtr_commit(&mtr);
+ }
load_page_first = FALSE;
diff --git a/log/log0online.c b/log/log0online.c
index 55eb9d17c46..be0a9708b8c 100644
--- a/log/log0online.c
+++ b/log/log0online.c
@@ -36,6 +36,11 @@ Online database log parsing for changed page tracking
enum { FOLLOW_SCAN_SIZE = 4 * (UNIV_PAGE_SIZE_MAX) };
+#ifdef UNIV_PFS_MUTEX
+/* Key to register log_bmp_sys->mutex with PFS */
+UNIV_INTERN mysql_pfs_key_t log_bmp_sys_mutex_key;
+#endif /* UNIV_PFS_MUTEX */
+
/** Log parsing and bitmap output data structure */
struct log_bitmap_struct {
byte read_buf[FOLLOW_SCAN_SIZE];
@@ -69,6 +74,7 @@ struct log_bitmap_struct {
both the correct type and the tree does
not mind its overwrite during
rbt_next() tree traversal. */
+ mutex_t mutex; /*!< mutex protecting all the fields.*/
};
/* The log parsing and bitmap output struct instance */
@@ -172,6 +178,8 @@ log_online_set_page_bit(
byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
byte *page_ptr;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
ut_a(space != ULINT_UNDEFINED);
ut_a(page_no != ULINT_UNDEFINED);
@@ -312,8 +320,8 @@ its name is correct and use it for (re-)tracking start.
@return the last fully tracked LSN */
static
ib_uint64_t
-log_online_read_last_tracked_lsn()
-/*==============================*/
+log_online_read_last_tracked_lsn(void)
+/*==================================*/
{
byte page[MODIFIED_PAGE_BLOCK_SIZE];
ibool is_last_page = FALSE;
@@ -405,8 +413,10 @@ log_online_can_track_missing(
if (last_tracked_lsn > tracking_start_lsn) {
fprintf(stderr,
- "InnoDB: Error: last tracked LSN is in future. This "
- "can be caused by mismatched bitmap files.\n");
+ "InnoDB: Error: last tracked LSN %llu is ahead of "
+ "tracking start LSN %llu. This can be caused by "
+ "mismatched bitmap files.\n", last_tracked_lsn,
+ tracking_start_lsn);
exit(1);
}
@@ -431,10 +441,10 @@ log_online_track_missing_on_startup(
{
ut_ad(last_tracked_lsn != tracking_start_lsn);
- fprintf(stderr, "InnoDB: last tracked LSN is %llu, but the last "
- "checkpoint LSN is %llu. This might be due to a server "
- "crash or a very fast shutdown. ", last_tracked_lsn,
- tracking_start_lsn);
+ fprintf(stderr, "InnoDB: last tracked LSN in \'%s\' is %llu, but the "
+ "last checkpoint LSN is %llu. This might be due to a server "
+ "crash or a very fast shutdown. ", log_bmp_sys->out.name,
+ last_tracked_lsn, tracking_start_lsn);
/* See if we can fully recover the missing interval */
if (log_online_can_track_missing(last_tracked_lsn,
@@ -446,7 +456,9 @@ log_online_track_missing_on_startup(
log_bmp_sys->start_lsn = ut_max_uint64(last_tracked_lsn,
MIN_TRACKED_LSN);
log_set_tracked_lsn(log_bmp_sys->start_lsn);
- log_online_follow_redo_log();
+ if (!log_online_follow_redo_log()) {
+ exit(1);
+ }
ut_ad(log_bmp_sys->end_lsn >= tracking_start_lsn);
fprintf(stderr,
@@ -484,18 +496,47 @@ log_online_make_bitmap_name(
}
/*********************************************************************//**
-Create a new empty bitmap output file. */
+Check if an old file that has the name of a new bitmap file we are about to
+create should be overwritten. */
static
-void
-log_online_start_bitmap_file()
-/*==========================*/
+ibool
+log_online_should_overwrite(
+/*========================*/
+ const char *path) /*!< in: path to file */
{
- ibool success;
+ ibool success;
+ os_file_stat_t file_info;
- log_bmp_sys->out.file
- = os_file_create(innodb_file_bmp_key, log_bmp_sys->out.name,
- OS_FILE_OVERWRITE, OS_FILE_NORMAL,
- OS_DATA_FILE, &success);
+ /* Currently, it's OK to overwrite 0-sized files only */
+ success = os_file_get_status(path, &file_info);
+ return success && file_info.size == 0LL;
+}
+
+/*********************************************************************//**
+Create a new empty bitmap output file.
+
+@return TRUE if operation succeeded, FALSE if I/O error */
+static
+ibool
+log_online_start_bitmap_file(void)
+/*==============================*/
+{
+ ibool success = TRUE;
+
+ /* Check for an old file that should be deleted first */
+ if (log_online_should_overwrite(log_bmp_sys->out.name)) {
+ success = os_file_delete(log_bmp_sys->out.name);
+ }
+
+ if (UNIV_LIKELY(success)) {
+ log_bmp_sys->out.file
+ = os_file_create_simple_no_error_handling(
+ innodb_file_bmp_key,
+ log_bmp_sys->out.name,
+ OS_FILE_CREATE,
+ OS_FILE_READ_WRITE,
+ &success);
+ }
if (UNIV_UNLIKELY(!success)) {
/* The following call prints an error message */
@@ -503,25 +544,32 @@ log_online_start_bitmap_file()
fprintf(stderr,
"InnoDB: Error: Cannot create \'%s\'\n",
log_bmp_sys->out.name);
- exit(1);
+ log_bmp_sys->out.file = -1;
+ return FALSE;
}
log_bmp_sys->out.offset = 0;
+ return TRUE;
}
/*********************************************************************//**
-Close the current bitmap output file and create the next one. */
+Close the current bitmap output file and create the next one.
+
+@return TRUE if operation succeeded, FALSE if I/O error */
static
-void
+ibool
log_online_rotate_bitmap_file(
/*===========================*/
ib_uint64_t next_file_start_lsn) /*!<in: the start LSN name
part */
{
- os_file_close(log_bmp_sys->out.file);
+ if (log_bmp_sys->out.file != -1) {
+ os_file_close(log_bmp_sys->out.file);
+ log_bmp_sys->out.file = -1;
+ }
log_bmp_sys->out_seq_num++;
log_online_make_bitmap_name(next_file_start_lsn);
- log_online_start_bitmap_file();
+ return log_online_start_bitmap_file();
}
/*********************************************************************//**
@@ -556,8 +604,8 @@ log_online_is_bitmap_file(
Initialize the online log following subsytem. */
UNIV_INTERN
void
-log_online_read_init()
-/*==================*/
+log_online_read_init(void)
+/*======================*/
{
ibool success;
ib_uint64_t tracking_start_lsn
@@ -566,13 +614,16 @@ log_online_read_init()
os_file_stat_t bitmap_dir_file_info;
ib_uint64_t last_file_start_lsn = MIN_TRACKED_LSN;
- /* Assert (could be compile-time assert) that bitmap data start and end
- in a bitmap block is 8-byte aligned */
- ut_a(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0);
- ut_a(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0);
+ /* Bitmap data start and end in a bitmap block must be 8-byte
+ aligned. */
+ compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0);
+ compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0);
log_bmp_sys = ut_malloc(sizeof(*log_bmp_sys));
+ mutex_create(log_bmp_sys_mutex_key, &log_bmp_sys->mutex,
+ SYNC_LOG_ONLINE);
+
/* Enumerate existing bitmap files to either open the last one to get
the last tracked LSN either to find that there are none and start
tracking from scratch. */
@@ -629,7 +680,9 @@ log_online_read_init()
if (!success) {
/* New file, tracking from scratch */
- log_online_start_bitmap_file();
+ if (!log_online_start_bitmap_file()) {
+ exit(1);
+ }
}
else {
@@ -637,6 +690,7 @@ log_online_read_init()
ulint size_low;
ulint size_high;
ib_uint64_t last_tracked_lsn;
+ ib_uint64_t file_start_lsn;
success = os_file_get_size(log_bmp_sys->out.file, &size_low,
&size_high);
@@ -667,10 +721,12 @@ log_online_read_init()
if we can retrack any missing data. */
if (log_online_can_track_missing(last_tracked_lsn,
tracking_start_lsn)) {
- log_online_rotate_bitmap_file(last_tracked_lsn);
+ file_start_lsn = last_tracked_lsn;
+ } else {
+ file_start_lsn = tracking_start_lsn;
}
- else {
- log_online_rotate_bitmap_file(tracking_start_lsn);
+ if (!log_online_rotate_bitmap_file(file_start_lsn)) {
+ exit(1);
}
if (last_tracked_lsn < tracking_start_lsn) {
@@ -701,12 +757,15 @@ log_online_read_init()
Shut down the online log following subsystem. */
UNIV_INTERN
void
-log_online_read_shutdown()
-/*======================*/
+log_online_read_shutdown(void)
+/*==========================*/
{
ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list;
- os_file_close(log_bmp_sys->out.file);
+ if (log_bmp_sys->out.file != -1) {
+ os_file_close(log_bmp_sys->out.file);
+ log_bmp_sys->out.file = -1;
+ }
rbt_free(log_bmp_sys->modified_pages);
@@ -716,6 +775,8 @@ log_online_read_shutdown()
free_list_node = next;
}
+ mutex_free(&log_bmp_sys->mutex);
+
ut_free(log_bmp_sys);
}
@@ -759,14 +820,16 @@ from the buffer. If an incomplete record is found, moves it to the end of the
buffer. */
static
void
-log_online_parse_redo_log()
-/*=======================*/
+log_online_parse_redo_log(void)
+/*===========================*/
{
byte *ptr = log_bmp_sys->parse_buf;
byte *end = log_bmp_sys->parse_buf_end;
ulint len = 0;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
while (ptr != end
&& log_bmp_sys->next_parse_lsn < log_bmp_sys->end_lsn) {
@@ -857,6 +920,8 @@ log_online_add_to_parse_buf(
ulint actual_data_len = (end_offset >= start_offset)
? end_offset - start_offset : 0;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
ut_memcpy(log_bmp_sys->parse_buf_end, log_block + start_offset,
actual_data_len);
@@ -881,6 +946,8 @@ log_online_parse_redo_log_block(
{
ulint block_data_len;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
block_data_len = log_block_get_data_len(log_block);
ut_ad(block_data_len % OS_FILE_LOG_BLOCK_SIZE == 0
@@ -907,6 +974,8 @@ log_online_follow_log_seg(
byte* log_block_end = log_bmp_sys->read_buf
+ (block_end_lsn - block_start_lsn);
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
mutex_enter(&log_sys->mutex);
log_group_read_log_seg(LOG_RECOVER, log_bmp_sys->read_buf,
group, block_start_lsn, block_end_lsn);
@@ -969,6 +1038,8 @@ log_online_follow_log_group(
ib_uint64_t block_start_lsn = contiguous_lsn;
ib_uint64_t block_end_lsn;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
log_bmp_sys->next_parse_lsn = log_bmp_sys->start_lsn;
log_bmp_sys->parse_buf_end = log_bmp_sys->parse_buf;
@@ -996,19 +1067,26 @@ log_online_follow_log_group(
/*********************************************************************//**
Write, flush one bitmap block to disk and advance the output position if
-successful. */
+successful.
+
+@return TRUE if page written OK, FALSE if I/O error */
static
-void
+ibool
log_online_write_bitmap_page(
/*=========================*/
const byte *block) /*!< in: block to write */
{
ibool success;
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
+
+ /* Simulate a write error */
+ DBUG_EXECUTE_IF("bitmap_page_write_error", return FALSE;);
+
success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file,
block,
(ulint)(log_bmp_sys->out.offset & 0xFFFFFFFF),
- (ulint)(log_bmp_sys->out.offset << 32),
+ (ulint)(log_bmp_sys->out.offset >> 32),
MODIFIED_PAGE_BLOCK_SIZE);
if (UNIV_UNLIKELY(!success)) {
@@ -1016,7 +1094,7 @@ log_online_write_bitmap_page(
os_file_get_last_error(TRUE);
fprintf(stderr, "InnoDB: Error: failed writing changed page "
"bitmap file \'%s\'\n", log_bmp_sys->out.name);
- return;
+ return FALSE;
}
success = os_file_flush(log_bmp_sys->out.file, FALSE);
@@ -1027,25 +1105,38 @@ log_online_write_bitmap_page(
fprintf(stderr, "InnoDB: Error: failed flushing "
"changed page bitmap file \'%s\'\n",
log_bmp_sys->out.name);
- return;
+ return FALSE;
}
+#ifdef UNIV_LINUX
+ posix_fadvise(log_bmp_sys->out.file, log_bmp_sys->out.offset,
+ MODIFIED_PAGE_BLOCK_SIZE, POSIX_FADV_DONTNEED);
+#endif
+
log_bmp_sys->out.offset += MODIFIED_PAGE_BLOCK_SIZE;
+ return TRUE;
}
/*********************************************************************//**
Append the current changed page bitmap to the bitmap file. Clears the
-bitmap tree and recycles its nodes to the free list. */
+bitmap tree and recycles its nodes to the free list.
+
+@return TRUE if bitmap written OK, FALSE if I/O error*/
static
-void
-log_online_write_bitmap()
-/*=====================*/
+ibool
+log_online_write_bitmap(void)
+/*=========================*/
{
ib_rbt_node_t *bmp_tree_node;
const ib_rbt_node_t *last_bmp_tree_node;
+ ibool success = TRUE;
+
+ ut_ad(mutex_own(&log_bmp_sys->mutex));
if (log_bmp_sys->out.offset >= srv_max_bitmap_file_size) {
- log_online_rotate_bitmap_file(log_bmp_sys->start_lsn);
+ if (!log_online_rotate_bitmap_file(log_bmp_sys->start_lsn)) {
+ return FALSE;
+ }
}
bmp_tree_node = (ib_rbt_node_t *)
@@ -1056,18 +1147,25 @@ log_online_write_bitmap()
byte *page = rbt_value(byte, bmp_tree_node);
- if (bmp_tree_node == last_bmp_tree_node) {
- mach_write_to_4(page + MODIFIED_PAGE_IS_LAST_BLOCK, 1);
- }
+ /* In case of a bitmap page write error keep on looping over
+ the tree to reclaim its memory through the free list instead of
+ returning immediatelly. */
+ if (UNIV_LIKELY(success)) {
+ if (bmp_tree_node == last_bmp_tree_node) {
+ mach_write_to_4(page
+ + MODIFIED_PAGE_IS_LAST_BLOCK,
+ 1);
+ }
- mach_write_to_8(page + MODIFIED_PAGE_START_LSN,
- log_bmp_sys->start_lsn);
- mach_write_to_8(page + MODIFIED_PAGE_END_LSN,
- log_bmp_sys->end_lsn);
- mach_write_to_4(page + MODIFIED_PAGE_BLOCK_CHECKSUM,
- log_online_calc_checksum(page));
+ mach_write_to_8(page + MODIFIED_PAGE_START_LSN,
+ log_bmp_sys->start_lsn);
+ mach_write_to_8(page + MODIFIED_PAGE_END_LSN,
+ log_bmp_sys->end_lsn);
+ mach_write_to_4(page + MODIFIED_PAGE_BLOCK_CHECKSUM,
+ log_online_calc_checksum(page));
- log_online_write_bitmap_page(page);
+ success = log_online_write_bitmap_page(page);
+ }
bmp_tree_node->left = log_bmp_sys->page_free_list;
log_bmp_sys->page_free_list = bmp_tree_node;
@@ -1077,18 +1175,29 @@ log_online_write_bitmap()
}
rbt_reset(log_bmp_sys->modified_pages);
+ return success;
}
/*********************************************************************//**
Read and parse the redo log up to last checkpoint LSN to build the changed
-page bitmap which is then written to disk. */
+page bitmap which is then written to disk.
+
+@return TRUE if log tracking succeeded, FALSE if bitmap write I/O error */
UNIV_INTERN
-void
-log_online_follow_redo_log()
-/*========================*/
+ibool
+log_online_follow_redo_log(void)
+/*============================*/
{
ib_uint64_t contiguous_start_lsn;
log_group_t* group;
+ ibool result;
+
+ mutex_enter(&log_bmp_sys->mutex);
+
+ if (!srv_track_changed_pages) {
+ mutex_exit(&log_bmp_sys->mutex);
+ return FALSE;
+ }
/* Grab the LSN of the last checkpoint, we will parse up to it */
mutex_enter(&(log_sys->mutex));
@@ -1096,7 +1205,8 @@ log_online_follow_redo_log()
mutex_exit(&(log_sys->mutex));
if (log_bmp_sys->end_lsn == log_bmp_sys->start_lsn) {
- return;
+ mutex_exit(&log_bmp_sys->mutex);
+ return TRUE;
}
group = UT_LIST_GET_FIRST(log_sys->log_groups);
@@ -1114,9 +1224,12 @@ log_online_follow_redo_log()
tracked LSN, so that LSN tracking for this interval is tested. */
DBUG_EXECUTE_IF("crash_before_bitmap_write", DBUG_SUICIDE(););
- log_online_write_bitmap();
+ result = log_online_write_bitmap();
log_bmp_sys->start_lsn = log_bmp_sys->end_lsn;
log_set_tracked_lsn(log_bmp_sys->start_lsn);
+
+ mutex_exit(&log_bmp_sys->mutex);
+ return result;
}
/*********************************************************************//**
@@ -1514,3 +1627,81 @@ log_online_bitmap_iterator_next(
return TRUE;
}
+
+/************************************************************//**
+Delete all the bitmap files for data less than the specified LSN.
+If called with lsn == 0 (i.e. set by RESET request) or
+IB_ULONGLONG_MAX, restart the bitmap file sequence, otherwise
+continue it.
+
+@return FALSE to indicate success, TRUE for failure. */
+UNIV_INTERN
+ibool
+log_online_purge_changed_page_bitmaps(
+/*==================================*/
+ ib_uint64_t lsn) /*!< in: LSN to purge files up to */
+{
+ log_online_bitmap_file_range_t bitmap_files;
+ size_t i;
+ ibool result = FALSE;
+
+ if (lsn == 0) {
+ lsn = IB_ULONGLONG_MAX;
+ }
+
+ if (srv_track_changed_pages) {
+ /* User requests might happen with both enabled and disabled
+ tracking */
+ mutex_enter(&log_bmp_sys->mutex);
+ }
+
+ if (!log_online_setup_bitmap_file_range(&bitmap_files, 0, lsn)) {
+ if (srv_track_changed_pages) {
+ mutex_exit(&log_bmp_sys->mutex);
+ }
+ return TRUE;
+ }
+
+ if (srv_track_changed_pages && lsn >= log_bmp_sys->end_lsn) {
+ /* If we have to delete the current output file, close it
+ first. */
+ os_file_close(log_bmp_sys->out.file);
+ log_bmp_sys->out.file = -1;
+ }
+
+ for (i = 0; i < bitmap_files.count; i++) {
+ if (bitmap_files.files[i].seq_num == 0
+ || bitmap_files.files[i].start_lsn >= lsn) {
+ break;
+ }
+ if (!os_file_delete_if_exists(bitmap_files.files[i].name)) {
+ os_file_get_last_error(TRUE);
+ result = TRUE;
+ break;
+ }
+ }
+
+ if (srv_track_changed_pages) {
+ if (lsn > log_bmp_sys->end_lsn) {
+ ib_uint64_t new_file_lsn;
+ if (lsn == IB_ULONGLONG_MAX) {
+ /* RESET restarts the sequence */
+ log_bmp_sys->out_seq_num = 0;
+ new_file_lsn = 0;
+ } else {
+ new_file_lsn = log_bmp_sys->end_lsn;
+ }
+ if (!log_online_rotate_bitmap_file(new_file_lsn)) {
+ /* If file create failed, signal the log
+ tracking thread to quit next time it wakes
+ up. */
+ srv_track_changed_pages = FALSE;
+ }
+ }
+
+ mutex_exit(&log_bmp_sys->mutex);
+ }
+
+ free(bitmap_files.files);
+ return result;
+}
diff --git a/log/log0recv.c b/log/log0recv.c
index 7322706dba7..d07a9d45c06 100644
--- a/log/log0recv.c
+++ b/log/log0recv.c
@@ -1697,9 +1697,8 @@ recv_recover_page_func(
if (fil_page_get_type(page) == FIL_PAGE_INDEX) {
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
- if (page_zip) {
- ut_a(page_zip_validate_low(page_zip, page, FALSE));
- }
+ ut_a(!page_zip
+ || page_zip_validate_low(page_zip, page, NULL, FALSE));
}
#endif /* UNIV_ZIP_DEBUG */
diff --git a/os/os0file.c b/os/os0file.c
index f3dbef91242..8510a5f8378 100644
--- a/os/os0file.c
+++ b/os/os0file.c
@@ -1402,6 +1402,13 @@ os_file_create_func(
DWORD create_flag;
DWORD attributes;
ibool retry;
+
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_disk_full",
+ *success = FALSE;
+ SetLastError(ERROR_DISK_FULL);
+ return((os_file_t) -1);
+ );
try_again:
ut_a(name);
@@ -1517,6 +1524,12 @@ try_again:
ibool retry;
const char* mode_str = NULL;
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_disk_full",
+ *success = FALSE;
+ errno = ENOSPC;
+ return((os_file_t) -1);
+ );
try_again:
ut_a(name);
@@ -4253,8 +4266,8 @@ os_aio_func(
ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
- ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0);
- ut_ad(offset % OS_FILE_LOG_BLOCK_SIZE == 0);
+ ut_ad(n % OS_MIN_LOG_BLOCK_SIZE == 0);
+ ut_ad(offset % OS_MIN_LOG_BLOCK_SIZE == 0);
ut_ad(os_aio_validate_skip());
#ifdef WIN_ASYNC_IO
ut_ad((n & 0xFFFFFFFFUL) == n);
diff --git a/page/page0cur.c b/page/page0cur.c
index d49b121afab..a722f5b188d 100644
--- a/page/page0cur.c
+++ b/page/page0cur.c
@@ -310,7 +310,7 @@ page_cur_search_with_match(
#endif /* UNIV_DEBUG */
page = buf_block_get_frame(block);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_check_dir(page);
@@ -1248,7 +1248,7 @@ page_cur_insert_rec_zip(
ut_ad(!page_rec_is_supremum(*current_rec));
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
/* 1. Get the size of the physical record in the page */
@@ -1902,6 +1902,7 @@ page_cur_delete_rec(
/* Save to local variables some data associated with current_rec */
cur_slot_no = page_dir_find_owner_slot(current_rec);
+ ut_ad(cur_slot_no > 0);
cur_dir_slot = page_dir_get_nth_slot(page, cur_slot_no);
cur_n_owned = page_dir_slot_get_n_owned(cur_dir_slot);
@@ -1972,7 +1973,7 @@ page_cur_delete_rec(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
diff --git a/page/page0page.c b/page/page0page.c
index e29fa2eb1e5..f2ce6c9fe16 100644
--- a/page/page0page.c
+++ b/page/page0page.c
@@ -626,7 +626,7 @@ page_copy_rec_list_end(
Furthermore, btr_compress() may set FIL_PAGE_PREV to
FIL_NULL on new_page while leaving it intact on
new_page_zip. So, we cannot validate new_page_zip. */
- ut_a(page_zip_validate_low(page_zip, page, TRUE));
+ ut_a(page_zip_validate_low(page_zip, page, index, TRUE));
}
#endif /* UNIV_ZIP_DEBUG */
ut_ad(buf_block_get_frame(block) == page);
@@ -796,8 +796,8 @@ zip_reorganize:
/* Before copying, "ret" was the predecessor
of the predefined supremum record. If it was
the predefined infimum record, then it would
- still be the infimum. Thus, the assertion
- ut_a(ret_pos > 0) would fail here. */
+ still be the infimum, and we would have
+ ret_pos == 0. */
if (UNIV_UNLIKELY
(!page_zip_reorganize(new_block, index, mtr))) {
@@ -946,7 +946,7 @@ page_delete_rec_list_end(
ut_ad(size == ULINT_UNDEFINED || size < UNIV_PAGE_SIZE);
ut_ad(!page_zip || page_rec_is_comp(rec));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page));
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_rec_is_infimum(rec)) {
@@ -988,7 +988,7 @@ page_delete_rec_list_end(
ULINT_UNDEFINED, &heap);
rec = rec_get_next_ptr(rec, TRUE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&cur, index, offsets, mtr);
} while (page_offset(rec) != PAGE_NEW_SUPREMUM);
@@ -1052,6 +1052,7 @@ page_delete_rec_list_end(
n_owned = rec_get_n_owned_new(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
+ ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
} else {
rec_t* rec2 = rec;
@@ -1067,6 +1068,7 @@ page_delete_rec_list_end(
n_owned = rec_get_n_owned_old(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
+ ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
}
@@ -1126,7 +1128,8 @@ page_delete_rec_list_start(
between btr_attach_half_pages() and insert_page = ...
when btr_page_get_split_rec_to_left() holds
(direction == FSP_DOWN). */
- ut_a(!page_zip || page_zip_validate_low(page_zip, page, TRUE));
+ ut_a(!page_zip
+ || page_zip_validate_low(page_zip, page, index, TRUE));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -1197,9 +1200,10 @@ page_move_rec_list_end(
= buf_block_get_page_zip(block);
ut_a(!new_page_zip == !page_zip);
ut_a(!new_page_zip
- || page_zip_validate(new_page_zip, new_page));
+ || page_zip_validate(new_page_zip, new_page, index));
ut_a(!page_zip
- || page_zip_validate(page_zip, page_align(split_rec)));
+ || page_zip_validate(page_zip, page_align(split_rec),
+ index));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -1471,6 +1475,10 @@ page_rec_get_nth_const(
ulint n_owned;
const rec_t* rec;
+ if (nth == 0) {
+ return(page_get_infimum_rec(page));
+ }
+
ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1));
for (i = 0;; i++) {
diff --git a/page/page0zip.c b/page/page0zip.c
index 4751f4816a9..5357479908f 100644
--- a/page/page0zip.c
+++ b/page/page0zip.c
@@ -1437,7 +1437,7 @@ err_exit:
page_zip_get_size(page_zip) - PAGE_DATA);
mem_heap_free(heap);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (mtr) {
@@ -3123,6 +3123,7 @@ page_zip_validate_low(
/*==================*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index, /*!< in: index of the page, if known */
ibool sloppy) /*!< in: FALSE=strict,
TRUE=ignore the MIN_REC_FLAG */
{
@@ -3210,39 +3211,102 @@ page_zip_validate_low(
committed. Let us tolerate that difference when we
are performing a sloppy validation. */
- if (sloppy) {
- byte info_bits_diff;
- ulint offset
- = rec_get_next_offs(page + PAGE_NEW_INFIMUM,
- TRUE);
- ut_a(offset >= PAGE_NEW_SUPREMUM);
- offset -= 5 /* REC_NEW_INFO_BITS */;
-
- info_bits_diff = page[offset] ^ temp_page[offset];
-
- if (info_bits_diff == REC_INFO_MIN_REC_FLAG) {
- temp_page[offset] = page[offset];
-
- if (!memcmp(page + PAGE_HEADER,
- temp_page + PAGE_HEADER,
- UNIV_PAGE_SIZE - PAGE_HEADER
- - FIL_PAGE_DATA_END)) {
-
- /* Only the minimum record flag
- differed. Let us ignore it. */
- page_zip_fail(("page_zip_validate: "
- "min_rec_flag "
- "(ignored, "
- "%lu,%lu,0x%02lx)\n",
- page_get_space_id(page),
- page_get_page_no(page),
- (ulong) page[offset]));
- goto func_exit;
+ ulint* offsets;
+ mem_heap_t* heap;
+ const rec_t* rec;
+ const rec_t* trec;
+ byte info_bits_diff;
+ ulint offset
+ = rec_get_next_offs(page + PAGE_NEW_INFIMUM, TRUE);
+ ut_a(offset >= PAGE_NEW_SUPREMUM);
+ offset -= 5/*REC_NEW_INFO_BITS*/;
+
+ info_bits_diff = page[offset] ^ temp_page[offset];
+
+ if (info_bits_diff == REC_INFO_MIN_REC_FLAG) {
+ temp_page[offset] = page[offset];
+
+ if (!memcmp(page + PAGE_HEADER,
+ temp_page + PAGE_HEADER,
+ UNIV_PAGE_SIZE - PAGE_HEADER
+ - FIL_PAGE_DATA_END)) {
+
+ /* Only the minimum record flag
+ differed. Let us ignore it. */
+ page_zip_fail(("page_zip_validate: "
+ "min_rec_flag "
+ "(%s"
+ "%lu,%lu,0x%02lx)\n",
+ sloppy ? "ignored, " : "",
+ page_get_space_id(page),
+ page_get_page_no(page),
+ (ulong) page[offset]));
+ valid = sloppy;
+ goto func_exit;
+ }
+ }
+
+ /* Compare the pointers in the PAGE_FREE list. */
+ rec = page_header_get_ptr(page, PAGE_FREE);
+ trec = page_header_get_ptr(temp_page, PAGE_FREE);
+
+ while (rec || trec) {
+ if (page_offset(rec) != page_offset(trec)) {
+ page_zip_fail(("page_zip_validate: "
+ "PAGE_FREE list: %u!=%u\n",
+ (unsigned) page_offset(rec),
+ (unsigned) page_offset(trec)));
+ valid = FALSE;
+ goto func_exit;
+ }
+
+ rec = page_rec_get_next_low(rec, TRUE);
+ trec = page_rec_get_next_low(trec, TRUE);
+ }
+
+ /* Compare the records. */
+ heap = NULL;
+ offsets = NULL;
+ rec = page_rec_get_next_low(
+ page + PAGE_NEW_INFIMUM, TRUE);
+ trec = page_rec_get_next_low(
+ temp_page + PAGE_NEW_INFIMUM, TRUE);
+
+ do {
+ if (page_offset(rec) != page_offset(trec)) {
+ page_zip_fail(("page_zip_validate: "
+ "record list: 0x%02x!=0x%02x\n",
+ (unsigned) page_offset(rec),
+ (unsigned) page_offset(trec)));
+ valid = FALSE;
+ break;
+ }
+
+ if (index) {
+ /* Compare the data. */
+ offsets = rec_get_offsets(
+ rec, index, offsets,
+ ULINT_UNDEFINED, &heap);
+
+ if (memcmp(rec - rec_offs_extra_size(offsets),
+ trec - rec_offs_extra_size(offsets),
+ rec_offs_size(offsets))) {
+ page_zip_fail(
+ ("page_zip_validate: "
+ "record content: 0x%02x",
+ (unsigned) page_offset(rec)));
+ valid = FALSE;
+ break;
}
}
+
+ rec = page_rec_get_next_low(rec, TRUE);
+ trec = page_rec_get_next_low(trec, TRUE);
+ } while (rec || trec);
+
+ if (heap) {
+ mem_heap_free(heap);
}
- page_zip_fail(("page_zip_validate: content\n"));
- valid = FALSE;
}
func_exit:
@@ -3264,9 +3328,10 @@ ibool
page_zip_validate(
/*==============*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
- const page_t* page) /*!< in: uncompressed page */
+ const page_t* page, /*!< in: uncompressed page */
+ const dict_index_t* index) /*!< in: index of the page, if known */
{
- return(page_zip_validate_low(page_zip, page,
+ return(page_zip_validate_low(page_zip, page, index,
recv_recovery_is_on()));
}
#endif /* UNIV_ZIP_DEBUG */
@@ -3597,7 +3662,7 @@ page_zip_write_rec(
page_zip->m_nonempty = TRUE;
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page_align(rec)));
+ ut_a(page_zip_validate(page_zip, page_align(rec), index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3644,7 +3709,7 @@ corrupt:
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
memcpy(page + offset,
@@ -3653,7 +3718,7 @@ corrupt:
ptr + 4, BTR_EXTERN_FIELD_REF_SIZE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -3720,7 +3785,7 @@ page_zip_write_blob_ptr(
memcpy(externs, field, BTR_EXTERN_FIELD_REF_SIZE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
if (mtr) {
@@ -3791,7 +3856,7 @@ corrupt:
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
field = page + offset;
@@ -3812,7 +3877,7 @@ corrupt:
memcpy(storage, ptr + 4, REC_NODE_PTR_SIZE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4039,7 +4104,7 @@ page_zip_clear_rec(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4063,7 +4128,7 @@ page_zip_rec_set_deleted(
*slot &= ~(PAGE_ZIP_DIR_SLOT_DEL >> 8);
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page_align(rec)));
+ ut_a(page_zip_validate(page_zip, page_align(rec), NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4364,14 +4429,14 @@ corrupt:
goto corrupt;
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
memcpy(page + offset, ptr, len);
memcpy(page_zip->data + offset, ptr, len);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, NULL));
#endif /* UNIV_ZIP_DEBUG */
}
@@ -4449,7 +4514,7 @@ page_zip_reorganize(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_is_comp(page));
ut_ad(!dict_index_is_ibuf(index));
- /* Note that page_zip_validate(page_zip, page) may fail here. */
+ /* Note that page_zip_validate(page_zip, page, index) may fail here. */
UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
@@ -4536,7 +4601,7 @@ page_zip_copy_recs(
FIL_PAGE_PREV or PAGE_LEVEL, causing a temporary min_rec_flag
mismatch. A strict page_zip_validate() will be executed later
during the B-tree operations. */
- ut_a(page_zip_validate_low(src_zip, src, TRUE));
+ ut_a(page_zip_validate_low(src_zip, src, index, TRUE));
#endif /* UNIV_ZIP_DEBUG */
ut_a(page_zip_get_size(page_zip) == page_zip_get_size(src_zip));
if (UNIV_UNLIKELY(src_zip->n_blobs)) {
@@ -4597,7 +4662,7 @@ page_zip_copy_recs(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page));
+ ut_a(page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
btr_blob_dbg_add(page, index, "page_zip_copy_recs");
diff --git a/row/row0mysql.c b/row/row0mysql.c
index b52dfa06ce1..615a6b48bcd 100644
--- a/row/row0mysql.c
+++ b/row/row0mysql.c
@@ -1878,7 +1878,8 @@ Creates a table for MySQL. If the name of the table ends in
one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor",
"innodb_table_monitor", then this will also start the printing of monitor
output by the master thread. If the table name ends in "innodb_mem_validate",
-InnoDB will try to invoke mem_validate().
+InnoDB will try to invoke mem_validate(). On failure the transaction will
+be rolled back and the 'table' object will be freed.
@return error code or DB_SUCCESS */
UNIV_INTERN
int
@@ -2016,6 +2017,8 @@ err_exit:
row_drop_table_for_mysql(table->name, trx, FALSE);
trx_commit_for_mysql(trx);
+ } else {
+ dict_mem_table_free(table);
}
break;
diff --git a/row/row0sel.c b/row/row0sel.c
index a3ea482ad4a..6b62b3b5382 100644
--- a/row/row0sel.c
+++ b/row/row0sel.c
@@ -2487,6 +2487,9 @@ row_sel_convert_mysql_key_to_innobase(
dfield++;
}
+ DBUG_EXECUTE_IF("innodb_srch_key_buffer_full",
+ ut_a(buf == (original_buf + buf_len)););
+
ut_a(buf <= original_buf + buf_len);
/* We set the length of tuple to n_fields: we assume that the memory
@@ -3984,6 +3987,11 @@ wait_table_again:
}
rec_loop:
+ if (trx_is_interrupted(trx)) {
+ err = DB_INTERRUPTED;
+ goto normal_return;
+ }
+
/*-------------------------------------------------------------*/
/* PHASE 4: Look for matching records in a loop */
@@ -4922,11 +4930,15 @@ row_search_autoinc_read_column(
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, col_no + 1, &heap);
- data = rec_get_nth_field(rec, offsets, col_no, &len);
+ if (rec_offs_nth_sql_null(offsets, col_no)) {
+ /* There is no non-NULL value in the auto-increment column. */
+ value = 0;
+ goto func_exit;
+ }
- ut_a(len != UNIV_SQL_NULL);
+ data = rec_get_nth_field(rec, offsets, col_no, &len);
switch (mtype) {
case DATA_INT:
@@ -4948,14 +4960,15 @@ row_search_autoinc_read_column(
ut_error;
}
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
if (!unsigned_type && (ib_int64_t) value < 0) {
value = 0;
}
+func_exit:
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+
return(value);
}
diff --git a/row/row0umod.c b/row/row0umod.c
index 9597c476125..9e5fb8686c6 100644
--- a/row/row0umod.c
+++ b/row/row0umod.c
@@ -69,36 +69,6 @@ If you make a change in this module make sure that no codepath is
introduced where a call to log_free_check() is bypassed. */
/***********************************************************//**
-Checks if also the previous version of the clustered index record was
-modified or inserted by the same transaction, and its undo number is such
-that it should be undone in the same rollback.
-@return TRUE if also previous modify or insert of this row should be undone */
-static
-ibool
-row_undo_mod_undo_also_prev_vers(
-/*=============================*/
- undo_node_t* node, /*!< in: row undo node */
- undo_no_t* undo_no)/*!< out: the undo number */
-{
- trx_undo_rec_t* undo_rec;
- trx_t* trx;
-
- trx = node->trx;
-
- if (node->new_trx_id != trx->id) {
-
- *undo_no = 0;
- return(FALSE);
- }
-
- undo_rec = trx_undo_get_undo_rec_low(node->new_roll_ptr, node->heap);
-
- *undo_no = trx_undo_rec_get_undo_no(undo_rec);
-
- return(trx->roll_limit <= *undo_no);
-}
-
-/***********************************************************//**
Undoes a modify in a clustered index record.
@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
static
@@ -226,19 +196,11 @@ row_undo_mod_clust(
btr_pcur_t* pcur;
mtr_t mtr;
ulint err;
- ibool success;
- ibool more_vers;
- undo_no_t new_undo_no;
ut_ad(node && thr);
log_free_check();
- /* Check if also the previous version of the clustered index record
- should be undone in this same rollback operation */
-
- more_vers = row_undo_mod_undo_also_prev_vers(node, &new_undo_no);
-
pcur = &(node->pcur);
mtr_start(&mtr);
@@ -286,20 +248,6 @@ row_undo_mod_clust(
trx_undo_rec_release(node->trx, node->undo_no);
- if (more_vers && err == DB_SUCCESS) {
-
- /* Reserve the undo log record to the prior version after
- committing &mtr: this is necessary to comply with the latching
- order, as &mtr may contain the fsp latch which is lower in
- the latch hierarchy than trx->undo_mutex. */
-
- success = trx_undo_rec_reserve(node->trx, new_undo_no);
-
- if (success) {
- node->state = UNDO_NODE_PREV_VERS;
- }
- }
-
return(err);
}
@@ -847,7 +795,6 @@ row_undo_mod_parse_undo_rec(
trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
roll_ptr, info_bits, trx,
node->heap, &(node->update));
- node->new_roll_ptr = roll_ptr;
node->new_trx_id = trx_id;
node->cmpl_info = cmpl_info;
}
diff --git a/row/row0undo.c b/row/row0undo.c
index 09970b7fe21..a1c1d72f8c6 100644
--- a/row/row0undo.c
+++ b/row/row0undo.c
@@ -282,25 +282,6 @@ row_undo(
} else {
node->state = UNDO_NODE_MODIFY;
}
-
- } else if (node->state == UNDO_NODE_PREV_VERS) {
-
- /* Undo should be done to the same clustered index record
- again in this same rollback, restoring the previous version */
-
- roll_ptr = node->new_roll_ptr;
-
- node->undo_rec = trx_undo_get_undo_rec_low(roll_ptr,
- node->heap);
- node->roll_ptr = roll_ptr;
- node->undo_no = trx_undo_rec_get_undo_no(node->undo_rec);
-
- if (trx_undo_roll_ptr_is_insert(roll_ptr)) {
-
- node->state = UNDO_NODE_INSERT;
- } else {
- node->state = UNDO_NODE_MODIFY;
- }
}
/* Prevent DROP TABLE etc. while we are rolling back this row.
diff --git a/srv/srv0srv.c b/srv/srv0srv.c
index 0864dd234d0..4b175c9c000 100644
--- a/srv/srv0srv.c
+++ b/srv/srv0srv.c
@@ -181,7 +181,7 @@ UNIV_INTERN my_bool srv_track_changed_pages = TRUE;
UNIV_INTERN ib_uint64_t srv_max_bitmap_file_size = 100 * 1024 * 1024;
-UNIV_INTERN ulonglong srv_changed_pages_limit = 0;
+UNIV_INTERN ulonglong srv_max_changed_pages = 0;
/** When TRUE, fake change transcations take S rather than X row locks.
When FALSE, row locks are not taken at all. */
@@ -2190,6 +2190,8 @@ srv_printf_innodb_monitor(
(long) srv_conc_n_threads,
(ulong) srv_conc_n_waiting_threads);
+ mutex_enter(&kernel_mutex);
+
fprintf(file, "%lu read views open inside InnoDB\n",
UT_LIST_GET_LEN(trx_sys->view_list));
@@ -2203,6 +2205,8 @@ srv_printf_innodb_monitor(
}
}
+ mutex_exit(&kernel_mutex);
+
n_reserved = fil_space_get_n_reserved_extents(0);
if (n_reserved > 0) {
fprintf(file,
@@ -3077,11 +3081,19 @@ srv_redo_log_follow_thread(
os_event_reset(srv_checkpoint_completed_event);
if (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE) {
- log_online_follow_redo_log();
+ if (!log_online_follow_redo_log()) {
+ /* TODO: sync with I_S log tracking status? */
+ fprintf(stderr,
+ "InnoDB: Error: log tracking bitmap "
+ "write failed, stopping log tracking "
+ "thread!\n");
+ break;
+ }
}
} while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE);
+ srv_track_changed_pages = FALSE;
log_online_read_shutdown();
os_event_set(srv_redo_log_thread_finished_event);
@@ -3479,8 +3491,7 @@ loop:
buf_pool = buf_pool_from_array(j);
- /* The scanning flush_list is optimistic here */
-
+ buf_flush_list_mutex_enter(buf_pool);
level = 0;
n_blocks = 0;
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
@@ -3494,6 +3505,7 @@ loop:
bpage = UT_LIST_GET_NEXT(flush_list, bpage);
n_blocks++;
}
+ buf_flush_list_mutex_exit(buf_pool);
if (level) {
bpl += ((ib_uint64_t) n_blocks * n_blocks
@@ -3559,30 +3571,26 @@ retry_flush_batch:
/* prev_flush_info[j] should be the previous loop's */
for (j = 0; j < srv_buf_pool_instances; j++) {
- lint blocks_num, new_blocks_num, flushed_blocks_num;
- ibool found;
+ lint blocks_num, new_blocks_num = 0;
+ lint flushed_blocks_num;
buf_pool = buf_pool_from_array(j);
+ buf_flush_list_mutex_enter(buf_pool);
blocks_num = UT_LIST_GET_LEN(buf_pool->flush_list);
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
- new_blocks_num = 0;
- found = FALSE;
while (bpage != NULL) {
if (prev_flush_info[j].space == bpage->space
&& prev_flush_info[j].offset == bpage->offset
&& prev_flush_info[j].oldest_modification
== bpage->oldest_modification) {
- found = TRUE;
break;
}
bpage = UT_LIST_GET_NEXT(flush_list, bpage);
new_blocks_num++;
}
- if (!found) {
- new_blocks_num = blocks_num;
- }
+ buf_flush_list_mutex_exit(buf_pool);
flushed_blocks_num = new_blocks_num + prev_flush_info[j].count
- blocks_num;
@@ -3590,6 +3598,7 @@ retry_flush_batch:
flushed_blocks_num = 0;
}
+ buf_flush_list_mutex_enter(buf_pool);
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
prev_flush_info[j].count = UT_LIST_GET_LEN(buf_pool->flush_list);
@@ -3597,7 +3606,9 @@ retry_flush_batch:
prev_flush_info[j].space = bpage->space;
prev_flush_info[j].offset = bpage->offset;
prev_flush_info[j].oldest_modification = bpage->oldest_modification;
+ buf_flush_list_mutex_exit(buf_pool);
} else {
+ buf_flush_list_mutex_exit(buf_pool);
prev_flush_info[j].space = 0;
prev_flush_info[j].offset = 0;
prev_flush_info[j].oldest_modification = 0;
@@ -3623,6 +3634,7 @@ retry_flush_batch:
/* store previous first pages of the flush_list */
for (j = 0; j < srv_buf_pool_instances; j++) {
buf_pool = buf_pool_from_array(j);
+ buf_flush_list_mutex_enter(buf_pool);
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
@@ -3631,7 +3643,9 @@ retry_flush_batch:
prev_flush_info[j].space = bpage->space;
prev_flush_info[j].offset = bpage->offset;
prev_flush_info[j].oldest_modification = bpage->oldest_modification;
+ buf_flush_list_mutex_exit(buf_pool);
} else {
+ buf_flush_list_mutex_exit(buf_pool);
prev_flush_info[j].space = 0;
prev_flush_info[j].offset = 0;
prev_flush_info[j].oldest_modification = 0;
diff --git a/srv/srv0start.c b/srv/srv0start.c
index 9d47be44582..550a63a2a0d 100644
--- a/srv/srv0start.c
+++ b/srv/srv0start.c
@@ -1155,6 +1155,11 @@ void
init_log_online(void)
/*=================*/
{
+ if (UNIV_UNLIKELY(srv_force_recovery > 0)) {
+ srv_track_changed_pages = FALSE;
+ return;
+ }
+
if (srv_track_changed_pages) {
log_online_read_init();