summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--extra/mariabackup/xtrabackup.cc4
-rw-r--r--storage/innobase/include/buf0buf.ic12
-rw-r--r--storage/innobase/include/log0recv.h15
-rw-r--r--storage/innobase/log/log0recv.cc141
4 files changed, 112 insertions, 60 deletions
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index e497ba5a972..73318d121fd 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -2686,7 +2686,9 @@ static lsn_t xtrabackup_copy_log(lsn_t start_lsn, lsn_t end_lsn, bool last)
}
}
- if (more_data && recv_parse_log_recs(0, STORE_NO, false)) {
+ store_t store = STORE_NO;
+
+ if (more_data && recv_parse_log_recs(0, &store, 0, false)) {
msg("Error: copying the log failed");
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 98026159b8a..b3fe52a0412 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -113,7 +113,17 @@ ulint
buf_pool_get_n_pages(void)
/*======================*/
{
- return(buf_pool_get_curr_size() / UNIV_PAGE_SIZE);
+ if (!buf_pool_ptr)
+ return buf_pool_get_curr_size() >> srv_page_size_shift;
+
+ ulint chunk_size= 0;
+ for (uint i= 0; i < srv_buf_pool_instances; i++)
+ {
+ buf_pool_t* buf_pool = buf_pool_from_array(i);
+ for (uint j= 0; j < buf_pool->n_chunks; j++)
+ chunk_size+= buf_pool->chunks[j]->size;
+ }
+ return chunk_size;
}
/********************************************************************//**
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index 74aa4fcb517..d6da4cad9a9 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -133,14 +133,19 @@ bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn);
/** Parse log records from a buffer and optionally store them to a
hash table to wait merging to file pages.
-@param[in] checkpoint_lsn the LSN of the latest checkpoint
-@param[in] store whether to store page operations
-@param[in] apply whether to apply the records
+@param[in] checkpoint_lsn the LSN of the latest checkpoint
+@param[in] store whether to store page operations
+@param[in] available_memory memory to read the redo logs
+@param[in] apply whether to apply the records
@return whether MLOG_CHECKPOINT record was seen the first time,
or corruption was noticed */
-bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t store, bool apply);
+bool recv_parse_log_recs(
+ lsn_t checkpoint_lsn,
+ store_t* store,
+ ulint available_memory,
+ bool apply);
-/** Moves the parsing buffer data left to the buffer start. */
+/** Moves the parsing buffer data left to the buffer start */
void recv_sys_justify_left_parsing_buf();
/** Report optimized DDL operation (without redo log),
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index 9a20c7f7d22..347953b5289 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -761,7 +761,6 @@ recv_sys_var_init(void)
recv_previous_parsed_rec_type = MLOG_SINGLE_REC_FLAG;
recv_previous_parsed_rec_offset = 0;
recv_previous_parsed_rec_is_multi = 0;
- recv_n_pool_free_frames = 256;
recv_max_page_lsn = 0;
}
@@ -841,17 +840,13 @@ recv_sys_init()
recv_sys->flush_end = os_event_create(0);
}
- ulint size = buf_pool_get_curr_size();
- /* Set appropriate value of recv_n_pool_free_frames. */
- if (size >= 10 << 20) {
- /* Buffer pool of size greater than 10 MB. */
- recv_n_pool_free_frames = 512;
- }
+ recv_n_pool_free_frames =
+ buf_pool_get_n_pages() / 3;
recv_sys->buf = static_cast<byte*>(
ut_malloc_nokey(RECV_PARSING_BUF_SIZE));
- recv_sys->addr_hash = hash_create(size / 512);
+ recv_sys->addr_hash = hash_create(buf_pool_get_curr_size() / 512);
recv_sys->progress_time = time(NULL);
recv_max_page_lsn = 0;
@@ -2758,14 +2753,40 @@ recv_mlog_index_load(ulint space_id, ulint page_no, lsn_t lsn)
}
}
+/** Check whether read redo log memory exceeds the available memory
+of buffer pool. Store last_stored_lsn if it is not in last phase
+@param[in] store whether to store page operations
+@param[in] available_mem Available memory in buffer pool to
+ read redo logs. */
+static bool recv_sys_heap_check(store_t* store, ulint available_mem)
+{
+ if (*store != STORE_NO
+ && mem_heap_get_size(recv_sys->heap) >= available_mem)
+ {
+ if (*store == STORE_YES)
+ recv_sys->last_stored_lsn= recv_sys->recovered_lsn;
+
+ *store= STORE_NO;
+ DBUG_PRINT("ib_log",("Ran out of memory and last "
+ "stored lsn " LSN_PF " last stored offset "
+ ULINTPF "\n",recv_sys->recovered_lsn,
+ recv_sys->recovered_offset));
+ return true;
+ }
+
+ return false;
+}
+
/** Parse log records from a buffer and optionally store them to a
hash table to wait merging to file pages.
-@param[in] checkpoint_lsn the LSN of the latest checkpoint
-@param[in] store whether to store page operations
-@param[in] apply whether to apply the records
+@param[in] checkpoint_lsn the LSN of the latest checkpoint
+@param[in] store whether to store page operations
+@param[in] available_mem memory to read the redo logs
+@param[in] apply whether to apply the records
@return whether MLOG_CHECKPOINT record was seen the first time,
or corruption was noticed */
-bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t store, bool apply)
+bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t* store,
+ ulint available_mem, bool apply)
{
byte* ptr;
byte* end_ptr;
@@ -2777,6 +2798,7 @@ bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t store, bool apply)
ulint space;
ulint page_no;
byte* body;
+ const bool last_phase = (*store == STORE_IF_EXISTS);
ut_ad(log_mutex_own());
ut_ad(mutex_own(&recv_sys->mutex));
@@ -2791,6 +2813,12 @@ loop:
return(false);
}
+ /* Check for memory overflow and ignore the parsing of remaining
+ redo log records if InnoDB ran out of memory */
+ if (recv_sys_heap_check(store, available_mem) && last_phase) {
+ return false;
+ }
+
switch (*ptr) {
case MLOG_CHECKPOINT:
#ifdef UNIV_LOG_LSN_DEBUG
@@ -2893,7 +2921,7 @@ loop:
break;
#endif /* UNIV_LOG_LSN_DEBUG */
default:
- switch (store) {
+ switch (*store) {
case STORE_NO:
break;
case STORE_IF_EXISTS:
@@ -3077,7 +3105,7 @@ corrupted_log:
recv_parse_or_apply_log_rec_body(). */
break;
default:
- switch (store) {
+ switch (*store) {
case STORE_NO:
break;
case STORE_IF_EXISTS:
@@ -3120,7 +3148,6 @@ bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn)
if (!recv_sys->parse_start_lsn) {
/* Cannot start parsing yet because no start point for
it found */
-
return(false);
}
@@ -3141,7 +3168,6 @@ bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn)
}
if (more_len == 0) {
-
return(false);
}
@@ -3176,8 +3202,9 @@ bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn)
/** Moves the parsing buffer data left to the buffer start. */
void recv_sys_justify_left_parsing_buf()
{
- ut_memmove(recv_sys->buf, recv_sys->buf + recv_sys->recovered_offset,
- recv_sys->len - recv_sys->recovered_offset);
+ memmove(recv_sys->buf,
+ recv_sys->buf + recv_sys->recovered_offset,
+ recv_sys->len - recv_sys->recovered_offset);
recv_sys->len -= recv_sys->recovered_offset;
@@ -3187,26 +3214,30 @@ void recv_sys_justify_left_parsing_buf()
/** Scan redo log from a buffer and stores new log data to the parsing buffer.
Parse and hash the log records if new data found.
Apply log records automatically when the hash table becomes full.
+@param[in] available_mem we let the hash table of recs to
+ grow to this size, at the maximum
+@param[in,out] store_to_hash whether the records should be
+ stored to the hash table; this is
+ reset if just debug checking is
+ needed, or when the available_mem
+ runs out
+@param[in] log_block log segment
+@param[in] checkpoint_lsn latest checkpoint LSN
+@param[in] start_lsn buffer start LSN
+@param[in] end_lsn buffer end LSN
+@param[in,out] contiguous_lsn it is known that all groups contain
+ contiguous log data upto this lsn
+@param[out] group_scanned_lsn scanning succeeded upto this lsn
@return true if not able to scan any more in this log group */
-static
-bool
-recv_scan_log_recs(
-/*===============*/
- ulint available_memory,/*!< in: we let the hash table of recs
- to grow to this size, at the maximum */
- store_t* store_to_hash, /*!< in,out: whether the records should be
- stored to the hash table; this is reset
- if just debug checking is needed, or
- when the available_memory runs out */
- const byte* log_block, /*!< in: log segment */
- lsn_t checkpoint_lsn, /*!< in: latest checkpoint LSN */
- lsn_t start_lsn, /*!< in: buffer start LSN */
- lsn_t end_lsn, /*!< in: buffer end LSN */
- lsn_t* contiguous_lsn, /*!< in/out: it is known that all log
- groups contain contiguous log data up
- to this lsn */
- lsn_t* group_scanned_lsn)/*!< out: scanning succeeded up to
- this lsn */
+static bool recv_scan_log_recs(
+ ulint available_mem,
+ store_t* store_to_hash,
+ const byte* log_block,
+ lsn_t checkpoint_lsn,
+ lsn_t start_lsn,
+ lsn_t end_lsn,
+ lsn_t* contiguous_lsn,
+ lsn_t* group_scanned_lsn)
{
lsn_t scanned_lsn = start_lsn;
bool finished = false;
@@ -3214,14 +3245,13 @@ recv_scan_log_recs(
bool more_data = false;
bool apply = recv_sys->mlog_checkpoint_lsn != 0;
ulint recv_parsing_buf_size = RECV_PARSING_BUF_SIZE;
-
+ const bool last_phase = (*store_to_hash == STORE_IF_EXISTS);
ut_ad(start_lsn % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(end_lsn % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(end_lsn >= start_lsn + OS_FILE_LOG_BLOCK_SIZE);
const byte* const log_end = log_block
+ ulint(end_lsn - start_lsn);
-
do {
ut_ad(!finished);
@@ -3332,6 +3362,13 @@ recv_scan_log_recs(
= log_block_get_checkpoint_no(log_block);
}
+ /* During last phase of scanning, there can be redo logs
+ left in recv_sys->buf to parse & store it in recv_sys->heap */
+ if (last_phase
+ && recv_sys->recovered_lsn < recv_sys->scanned_lsn) {
+ more_data = true;
+ }
+
if (data_len < OS_FILE_LOG_BLOCK_SIZE) {
/* Log data for this group ends here */
finished = true;
@@ -3349,7 +3386,8 @@ recv_scan_log_recs(
/* Try to parse more log records */
if (recv_parse_log_recs(checkpoint_lsn,
- *store_to_hash, apply)) {
+ store_to_hash, available_mem,
+ apply)) {
ut_ad(recv_sys->found_corrupt_log
|| recv_sys->found_corrupt_fs
|| recv_sys->mlog_checkpoint_lsn
@@ -3358,22 +3396,18 @@ recv_scan_log_recs(
goto func_exit;
}
- if (*store_to_hash != STORE_NO
- && mem_heap_get_size(recv_sys->heap) > available_memory) {
-
- DBUG_PRINT("ib_log", ("Ran out of memory and last "
- "stored lsn " LSN_PF,
- recv_sys->recovered_lsn));
-
- recv_sys->last_stored_lsn = recv_sys->recovered_lsn;
- *store_to_hash = STORE_NO;
- }
+ recv_sys_heap_check(store_to_hash, available_mem);
if (recv_sys->recovered_offset > recv_parsing_buf_size / 4) {
/* Move parsing buffer data to the buffer start */
-
recv_sys_justify_left_parsing_buf();
}
+
+ /* Need to re-parse the redo log which're stored
+ in recv_sys->buf */
+ if (last_phase && *store_to_hash == STORE_NO) {
+ finished = false;
+ }
}
func_exit:
@@ -3437,6 +3471,8 @@ recv_group_scan_log_recs(
redo log records before we have
finished the redo log scan. */
recv_apply_hashed_log_recs(false);
+ /* Rescan the redo logs from last stored lsn */
+ end_lsn = recv_sys->recovered_lsn;
}
start_lsn = ut_uint64_align_down(end_lsn,
@@ -3448,8 +3484,7 @@ recv_group_scan_log_recs(
} while (end_lsn != start_lsn
&& !recv_scan_log_recs(
available_mem, &store_to_hash, log_sys->buf,
- checkpoint_lsn,
- start_lsn, end_lsn,
+ checkpoint_lsn, start_lsn, end_lsn,
contiguous_lsn, &group->scanned_lsn));
if (recv_sys->found_corrupt_log || recv_sys->found_corrupt_fs) {