summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2019-10-10 21:30:40 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2019-10-10 21:30:40 +0300
commit09afd3da1a80e403b5375845770dde61832afa50 (patch)
treea157942b08fc3d6bf97e2af3ae6e1d2e2ecbfc3e /storage
parent0f7732d1d1d898f1a9051858932c18bcc9d6f2b4 (diff)
parent4cdb72f2372b27e1fbbc573812240c1e29128c8f (diff)
downloadmariadb-git-09afd3da1a80e403b5375845770dde61832afa50.tar.gz
Merge 10.3 into 10.4
Diffstat (limited to 'storage')
-rw-r--r--storage/innobase/btr/btr0btr.cc206
-rw-r--r--storage/innobase/btr/btr0cur.cc2
-rw-r--r--storage/innobase/buf/buf0dblwr.cc2
-rw-r--r--storage/innobase/handler/ha_innodb.cc2
-rw-r--r--storage/innobase/include/btr0btr.h40
-rw-r--r--storage/innobase/include/log0log.h8
-rw-r--r--storage/innobase/include/row0ins.h5
-rw-r--r--storage/innobase/include/srv0srv.h4
-rw-r--r--storage/innobase/log/log0log.cc12
-rw-r--r--storage/innobase/page/page0page.cc76
-rw-r--r--storage/innobase/rem/rem0rec.cc3
-rw-r--r--storage/innobase/row/row0import.cc2
-rw-r--r--storage/innobase/row/row0ins.cc17
-rw-r--r--storage/innobase/row/row0upd.cc11
-rw-r--r--storage/innobase/srv/srv0srv.cc4
-rw-r--r--storage/innobase/srv/srv0start.cc2
-rw-r--r--storage/mroonga/ha_mroonga.hpp7
17 files changed, 204 insertions, 199 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 0c563954703..ac794cc8d10 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -2088,104 +2088,92 @@ btr_root_raise_and_insert(
}
}
-/*************************************************************//**
-Decides if the page should be split at the convergence point of inserts
+/** Decide if the page should be split at the convergence point of inserts
converging to the left.
-@return TRUE if split recommended */
-ibool
-btr_page_get_split_rec_to_left(
-/*===========================*/
- btr_cur_t* cursor, /*!< in: cursor at which to insert */
- rec_t** split_rec) /*!< out: if split recommended,
- the first record on upper half page,
- or NULL if tuple to be inserted should
- be first */
+@param[in] cursor insert position
+@return the first record to be moved to the right half page
+@retval NULL if no split is recommended */
+rec_t* btr_page_get_split_rec_to_left(const btr_cur_t* cursor)
{
- page_t* page;
- rec_t* insert_point;
- rec_t* infimum;
-
- page = btr_cur_get_page(cursor);
- insert_point = btr_cur_get_rec(cursor);
+ rec_t* split_rec = btr_cur_get_rec(cursor);
+ const page_t* page = page_align(split_rec);
if (page_header_get_ptr(page, PAGE_LAST_INSERT)
- == page_rec_get_next(insert_point)) {
-
- infimum = page_get_infimum_rec(page);
+ != page_rec_get_next(split_rec)) {
+ return NULL;
+ }
- /* If the convergence is in the middle of a page, include also
- the record immediately before the new insert to the upper
- page. Otherwise, we could repeatedly move from page to page
- lots of records smaller than the convergence point. */
+ /* The metadata record must be present in the leftmost leaf page
+ of the clustered index, if and only if index->is_instant().
+ However, during innobase_instant_try(), index->is_instant()
+ would already hold when row_ins_clust_index_entry_low()
+ is being invoked to insert the the metadata record.
+ So, we can only assert that when the metadata record exists,
+ index->is_instant() must hold. */
+ ut_ad(!page_is_leaf(page) || page_has_prev(page)
+ || cursor->index->is_instant()
+ || !(rec_get_info_bits(page_rec_get_next_const(
+ page_get_infimum_rec(page)),
+ cursor->index->table->not_redundant())
+ & REC_INFO_MIN_REC_FLAG));
- if (infimum != insert_point
- && page_rec_get_next(infimum) != insert_point) {
+ const rec_t* infimum = page_get_infimum_rec(page);
- *split_rec = insert_point;
- } else {
- *split_rec = page_rec_get_next(insert_point);
- }
+ /* If the convergence is in the middle of a page, include also
+ the record immediately before the new insert to the upper
+ page. Otherwise, we could repeatedly move from page to page
+ lots of records smaller than the convergence point. */
- return(TRUE);
+ if (split_rec == infimum
+ || split_rec == page_rec_get_next_const(infimum)) {
+ split_rec = page_rec_get_next(split_rec);
}
- return(FALSE);
+ return split_rec;
}
-/*************************************************************//**
-Decides if the page should be split at the convergence point of inserts
+/** Decide if the page should be split at the convergence point of inserts
converging to the right.
-@return TRUE if split recommended */
-ibool
-btr_page_get_split_rec_to_right(
-/*============================*/
- btr_cur_t* cursor, /*!< in: cursor at which to insert */
- rec_t** split_rec) /*!< out: if split recommended,
- the first record on upper half page,
- or NULL if tuple to be inserted should
- be first */
+@param[in] cursor insert position
+@param[out] split_rec if split recommended, the first record
+ on the right half page, or
+ NULL if the to-be-inserted record
+ should be first
+@return whether split is recommended */
+bool
+btr_page_get_split_rec_to_right(const btr_cur_t* cursor, rec_t** split_rec)
{
- page_t* page;
- rec_t* insert_point;
-
- page = btr_cur_get_page(cursor);
- insert_point = btr_cur_get_rec(cursor);
+ rec_t* insert_point = btr_cur_get_rec(cursor);
+ const page_t* page = page_align(insert_point);
/* We use eager heuristics: if the new insert would be right after
the previous insert on the same page, we assume that there is a
pattern of sequential inserts here. */
- if (page_header_get_ptr(page, PAGE_LAST_INSERT) == insert_point) {
-
- rec_t* next_rec;
-
- next_rec = page_rec_get_next(insert_point);
-
- if (page_rec_is_supremum(next_rec)) {
-split_at_new:
- /* Split at the new record to insert */
- *split_rec = NULL;
- } else {
- rec_t* next_next_rec = page_rec_get_next(next_rec);
- if (page_rec_is_supremum(next_next_rec)) {
-
- goto split_at_new;
- }
+ if (page_header_get_ptr(page, PAGE_LAST_INSERT) != insert_point) {
+ return false;
+ }
- /* If there are >= 2 user records up from the insert
- point, split all but 1 off. We want to keep one because
- then sequential inserts can use the adaptive hash
- index, as they can do the necessary checks of the right
- search position just by looking at the records on this
- page. */
+ insert_point = page_rec_get_next(insert_point);
- *split_rec = next_next_rec;
+ if (page_rec_is_supremum(insert_point)) {
+ insert_point = NULL;
+ } else {
+ insert_point = page_rec_get_next(insert_point);
+ if (page_rec_is_supremum(insert_point)) {
+ insert_point = NULL;
}
- return(TRUE);
+ /* If there are >= 2 user records up from the insert
+ point, split all but 1 off. We want to keep one because
+ then sequential inserts can use the adaptive hash
+ index, as they can do the necessary checks of the right
+ search position just by looking at the records on this
+ page. */
}
- return(FALSE);
+ *split_rec = insert_point;
+ return true;
}
/*************************************************************//**
@@ -2837,30 +2825,20 @@ btr_page_split_and_insert(
buf_block_t* block;
page_t* page;
page_zip_des_t* page_zip;
- ulint page_no;
- byte direction;
- ulint hint_page_no;
buf_block_t* new_block;
page_t* new_page;
page_zip_des_t* new_page_zip;
rec_t* split_rec;
buf_block_t* left_block;
buf_block_t* right_block;
- buf_block_t* insert_block;
page_cur_t* page_cursor;
rec_t* first_rec;
byte* buf = 0; /* remove warning */
rec_t* move_limit;
- ibool insert_will_fit;
- ibool insert_left;
ulint n_iterations = 0;
- rec_t* rec;
ulint n_uniq;
- dict_index_t* index;
-
- index = btr_cur_get_index(cursor);
- if (dict_index_is_spatial(index)) {
+ if (cursor->index->is_spatial()) {
/* Split rtree page and update parent */
return(rtr_page_split_and_insert(flags, cursor, offsets, heap,
tuple, n_ext, mtr));
@@ -2891,23 +2869,19 @@ func_start:
ut_ad(!page_is_empty(page));
/* try to insert to the next page if possible before split */
- rec = btr_insert_into_right_sibling(
- flags, cursor, offsets, *heap, tuple, n_ext, mtr);
-
- if (rec != NULL) {
+ if (rec_t* rec = btr_insert_into_right_sibling(
+ flags, cursor, offsets, *heap, tuple, n_ext, mtr)) {
return(rec);
}
- page_no = block->page.id.page_no();
-
/* 1. Decide the split record; split_rec == NULL means that the
tuple to be inserted should be the first record on the upper
half-page */
- insert_left = FALSE;
+ bool insert_left = false;
+ ulint hint_page_no = block->page.id.page_no() + 1;
+ byte direction = FSP_UP;
- if (tuple != NULL && n_iterations > 0) {
- direction = FSP_UP;
- hint_page_no = page_no + 1;
+ if (tuple && n_iterations > 0) {
split_rec = btr_page_get_split_rec(cursor, tuple, n_ext);
if (split_rec == NULL) {
@@ -2915,17 +2889,10 @@ func_start:
cursor, tuple, offsets, n_uniq, heap);
}
} else if (btr_page_get_split_rec_to_right(cursor, &split_rec)) {
- direction = FSP_UP;
- hint_page_no = page_no + 1;
-
- } else if (btr_page_get_split_rec_to_left(cursor, &split_rec)) {
+ } else if ((split_rec = btr_page_get_split_rec_to_left(cursor))) {
direction = FSP_DOWN;
- hint_page_no = page_no - 1;
- ut_ad(split_rec);
+ hint_page_no -= 2;
} else {
- direction = FSP_UP;
- hint_page_no = page_no + 1;
-
/* If there is only one record in the index page, we
can't split the node in the middle by default. We need
to determine whether the new record will be inserted
@@ -2950,7 +2917,7 @@ func_start:
new_block = btr_page_alloc(cursor->index, hint_page_no, direction,
btr_page_get_level(page), mtr, mtr);
- if (new_block == NULL && os_has_said_disk_full) {
+ if (!new_block) {
return(NULL);
}
@@ -2975,12 +2942,8 @@ func_start:
*offsets = rec_get_offsets(split_rec, cursor->index, *offsets,
page_is_leaf(page), n_uniq, heap);
- if (tuple != NULL) {
- insert_left = cmp_dtuple_rec(
- tuple, split_rec, *offsets) < 0;
- } else {
- insert_left = 1;
- }
+ insert_left = !tuple
+ || cmp_dtuple_rec(tuple, split_rec, *offsets) < 0;
if (!insert_left && new_page_zip && n_iterations > 0) {
/* If a compressed page has already been split,
@@ -3015,10 +2978,10 @@ insert_empty:
on the appropriate half-page, we may release the tree x-latch.
We can then move the records after releasing the tree latch,
thus reducing the tree latch contention. */
+ bool insert_will_fit;
if (tuple == NULL) {
- insert_will_fit = 1;
- }
- else if (split_rec) {
+ insert_will_fit = true;
+ } else if (split_rec) {
insert_will_fit = !new_page_zip
&& btr_page_insert_fits(cursor, split_rec,
offsets, tuple, n_ext, heap);
@@ -3115,8 +3078,6 @@ insert_empty:
/* Update the lock table and possible hash index. */
lock_move_rec_list_end(new_block, block, move_limit);
- ut_ad(!dict_index_is_spatial(index));
-
btr_search_move_or_delete_hash_entries(
new_block, block);
@@ -3148,18 +3109,15 @@ insert_empty:
/* 6. The split and the tree modification is now completed. Decide the
page where the tuple should be inserted */
+ rec_t* rec;
+ buf_block_t* const insert_block = insert_left
+ ? left_block : right_block;
- if (tuple == NULL) {
+ if (UNIV_UNLIKELY(!tuple)) {
rec = NULL;
goto func_exit;
}
- if (insert_left) {
- insert_block = left_block;
- } else {
- insert_block = right_block;
- }
-
/* 7. Reposition the cursor for insert and try insertion */
page_cursor = btr_cur_get_page_cur(cursor);
@@ -3236,9 +3194,7 @@ func_exit:
ut_ad(page_validate(buf_block_get_frame(left_block), cursor->index));
ut_ad(page_validate(buf_block_get_frame(right_block), cursor->index));
- if (tuple == NULL) {
- ut_ad(rec == NULL);
- }
+ ut_ad(tuple || !rec);
ut_ad(!rec || rec_offs_validate(rec, cursor->index, *offsets));
return(rec);
}
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 9d734e4a237..d3c830256f6 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -3460,7 +3460,7 @@ fail_err:
&& page_get_n_recs(page) >= 2
&& dict_index_get_space_reserve() + rec_size > max_size
&& (btr_page_get_split_rec_to_right(cursor, &dummy)
- || btr_page_get_split_rec_to_left(cursor, &dummy))) {
+ || btr_page_get_split_rec_to_left(cursor))) {
goto fail;
}
diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc
index 4c83e62dfe3..acd2a89e57c 100644
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@ -325,7 +325,7 @@ too_small:
mtr_commit(&mtr);
/* Flush the modified pages to disk and make a checkpoint */
- log_make_checkpoint_at(LSN_MAX);
+ log_make_checkpoint();
/* Remove doublewrite pages from LRU */
buf_pool_invalidate();
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 32f6d016ff7..e39cf3e620b 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -18143,7 +18143,7 @@ checkpoint_now_set(THD*, st_mysql_sys_var*, void*, const void* save)
+ (log_sys.append_on_checkpoint != NULL
? log_sys.append_on_checkpoint->size() : 0)
< log_sys.lsn) {
- log_make_checkpoint_at(LSN_MAX);
+ log_make_checkpoint();
fil_flush_file_spaces(FIL_TYPE_LOG);
}
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index e0fa2e734f8..283c2e0298a 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -452,30 +452,22 @@ btr_page_reorganize(
dict_index_t* index, /*!< in: the index tree of the page */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull));
-/*************************************************************//**
-Decides if the page should be split at the convergence point of
-inserts converging to left.
-@return TRUE if split recommended */
-ibool
-btr_page_get_split_rec_to_left(
-/*===========================*/
- btr_cur_t* cursor, /*!< in: cursor at which to insert */
- rec_t** split_rec)/*!< out: if split recommended,
- the first record on upper half page,
- or NULL if tuple should be first */
- MY_ATTRIBUTE((warn_unused_result));
-/*************************************************************//**
-Decides if the page should be split at the convergence point of
-inserts converging to right.
-@return TRUE if split recommended */
-ibool
-btr_page_get_split_rec_to_right(
-/*============================*/
- btr_cur_t* cursor, /*!< in: cursor at which to insert */
- rec_t** split_rec)/*!< out: if split recommended,
- the first record on upper half page,
- or NULL if tuple should be first */
- MY_ATTRIBUTE((warn_unused_result));
+/** Decide if the page should be split at the convergence point of inserts
+converging to the left.
+@param[in] cursor insert position
+@return the first record to be moved to the right half page
+@retval NULL if no split is recommended */
+rec_t* btr_page_get_split_rec_to_left(const btr_cur_t* cursor);
+/** Decide if the page should be split at the convergence point of inserts
+converging to the right.
+@param[in] cursor insert position
+@param[out] split_rec if split recommended, the first record
+ on the right half page, or
+ NULL if the to-be-inserted record
+ should be first
+@return whether split is recommended */
+bool
+btr_page_get_split_rec_to_right(const btr_cur_t* cursor, rec_t** split_rec);
/*************************************************************//**
Splits an index page to halves and inserts the tuple. It is assumed
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index 9015d78bf67..4ab766d7bf0 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -192,15 +192,13 @@ log_buffer_sync_in_background(
/** Make a checkpoint. Note that this function does not flush dirty
blocks from the buffer pool: it only checks what is lsn of the oldest
modification in the pool, and writes information about the lsn in
-log files. Use log_make_checkpoint_at() to flush also the pool.
+log files. Use log_make_checkpoint() to flush also the pool.
@param[in] sync whether to wait for the write to complete
@return true if success, false if a checkpoint write was already running */
bool log_checkpoint(bool sync);
-/** Make a checkpoint at or after a specified LSN.
-@param[in] lsn the log sequence number, or LSN_MAX
-for the latest LSN */
-void log_make_checkpoint_at(lsn_t lsn);
+/** Make a checkpoint */
+void log_make_checkpoint();
/****************************************************************//**
Makes a checkpoint at the latest lsn and writes it to first page of each
diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h
index ddc3db8c694..87a72d88eb6 100644
--- a/storage/innobase/include/row0ins.h
+++ b/storage/innobase/include/row0ins.h
@@ -145,7 +145,10 @@ row_ins_sec_index_entry(
/*====================*/
dict_index_t* index, /*!< in: secondary index */
dtuple_t* entry, /*!< in/out: index entry to insert */
- que_thr_t* thr) /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread */
+ bool check_ref) /*!< in: TRUE if we want to check that
+ the referenced table is ok, FALSE if we
+ want to check the foreign key table */
MY_ATTRIBUTE((warn_unused_result));
/***********************************************************//**
Inserts a row to a table. This is a high-level function used in
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 851e6081f9c..9ae0f9a314c 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -629,11 +629,11 @@ do { \
#ifdef HAVE_PSI_STAGE_INTERFACE
/** Performance schema stage event for monitoring ALTER TABLE progress
-everything after flush log_make_checkpoint_at(). */
+everything after flush log_make_checkpoint(). */
extern PSI_stage_info srv_stage_alter_table_end;
/** Performance schema stage event for monitoring ALTER TABLE progress
-log_make_checkpoint_at(). */
+log_make_checkpoint(). */
extern PSI_stage_info srv_stage_alter_table_flush;
/** Performance schema stage event for monitoring ALTER TABLE progress
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 0e794a865d1..f3052bc9e2e 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -1340,7 +1340,7 @@ log_append_on_checkpoint(
/** Make a checkpoint. Note that this function does not flush dirty
blocks from the buffer pool: it only checks what is lsn of the oldest
modification in the pool, and writes information about the lsn in
-log files. Use log_make_checkpoint_at() to flush also the pool.
+log files. Use log_make_checkpoint() to flush also the pool.
@param[in] sync whether to wait for the write to complete
@return true if success, false if a checkpoint write was already running */
bool log_checkpoint(bool sync)
@@ -1456,14 +1456,12 @@ bool log_checkpoint(bool sync)
return(true);
}
-/** Make a checkpoint at or after a specified LSN.
-@param[in] lsn the log sequence number, or LSN_MAX
-for the latest LSN */
-void log_make_checkpoint_at(lsn_t lsn)
+/** Make a checkpoint */
+void log_make_checkpoint()
{
/* Preflush pages synchronously */
- while (!log_preflush_pool_modified_pages(lsn)) {
+ while (!log_preflush_pool_modified_pages(LSN_MAX)) {
/* Flush as much as we can */
}
@@ -1796,7 +1794,7 @@ wait_suspend_loop:
if (!srv_read_only_mode) {
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"ensuring dirty buffer pool are written to log");
- log_make_checkpoint_at(LSN_MAX);
+ log_make_checkpoint();
log_mutex_enter();
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index a40372a2ed9..36dfabb7c49 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -583,20 +583,20 @@ page_copy_rec_list_end_no_locks(
/* Copy records from the original page to the new page */
while (!page_cur_is_after_last(&cur1)) {
- rec_t* cur1_rec = page_cur_get_rec(&cur1);
rec_t* ins_rec;
- offsets = rec_get_offsets(cur1_rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(cur1.rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur2, index,
- cur1_rec, offsets, mtr);
+ cur1.rec, offsets, mtr);
if (UNIV_UNLIKELY(!ins_rec)) {
ib::fatal() << "Rec offset " << page_offset(rec)
- << ", cur1 offset "
- << page_offset(page_cur_get_rec(&cur1))
+ << ", cur1 offset " << page_offset(cur1.rec)
<< ", cur2 offset " << page_offset(cur2);
}
page_cur_move_to_next(&cur1);
+ ut_ad(!(rec_get_info_bits(cur1.rec, page_is_comp(new_page))
+ & REC_INFO_MIN_REC_FLAG));
cur2 = ins_rec;
}
@@ -780,6 +780,8 @@ page_copy_rec_list_start(
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
{
+ ut_ad(page_align(rec) == block->frame);
+
page_t* new_page = buf_block_get_frame(new_block);
page_zip_des_t* new_page_zip = buf_block_get_page_zip(new_block);
page_cur_t cur1;
@@ -797,7 +799,6 @@ page_copy_rec_list_start(
predefined infimum record. */
if (page_rec_is_infimum(rec)) {
-
return(ret);
}
@@ -815,7 +816,8 @@ page_copy_rec_list_start(
const bool is_leaf = page_rec_is_leaf(rec);
/* Copy records from the original page to the new page */
- if (dict_index_is_spatial(index)) {
+ if (index->is_spatial()) {
+ ut_ad(!index->is_instant());
ulint max_to_move = page_get_n_recs(
buf_block_get_frame(block));
heap = mem_heap_create(256);
@@ -831,17 +833,18 @@ page_copy_rec_list_start(
rec_move, max_to_move,
&num_moved, mtr);
} else {
-
while (page_cur_get_rec(&cur1) != rec) {
- rec_t* cur1_rec = page_cur_get_rec(&cur1);
- offsets = rec_get_offsets(cur1_rec, index, offsets,
+ offsets = rec_get_offsets(cur1.rec, index, offsets,
is_leaf,
ULINT_UNDEFINED, &heap);
cur2 = page_cur_insert_rec_low(cur2, index,
- cur1_rec, offsets, mtr);
+ cur1.rec, offsets, mtr);
ut_a(cur2);
page_cur_move_to_next(&cur1);
+ ut_ad(!(rec_get_info_bits(cur1.rec,
+ page_is_comp(new_page))
+ & REC_INFO_MIN_REC_FLAG));
}
}
@@ -1233,6 +1236,7 @@ page_delete_rec_list_start(
rec_offs_init(offsets_);
+ ut_ad(page_align(rec) == block->frame);
ut_ad((ibool) !!page_rec_is_comp(rec)
== dict_table_is_comp(index->table));
#ifdef UNIV_ZIP_DEBUG
@@ -2144,7 +2148,17 @@ page_simple_validate_old(
goto func_exit;
}
- rec = page_rec_get_next_const(rec);
+ ulint offs = rec_get_next_offs(rec, FALSE);
+ if (!offs) {
+ break;
+ }
+ if (UNIV_UNLIKELY(offs < PAGE_OLD_INFIMUM
+ || offs >= srv_page_size)) {
+ ib::error() << "Page free list is corrupted " << count;
+ goto func_exit;
+ }
+
+ rec = page + offs;
}
if (UNIV_UNLIKELY(page_dir_get_n_heap(page) != count + 1)) {
@@ -2336,7 +2350,17 @@ page_simple_validate_new(
goto func_exit;
}
- rec = page_rec_get_next_const(rec);
+ const ulint offs = rec_get_next_offs(rec, TRUE);
+ if (!offs) {
+ break;
+ }
+ if (UNIV_UNLIKELY(offs < PAGE_OLD_INFIMUM
+ || offs >= srv_page_size)) {
+ ib::error() << "Page free list is corrupted " << count;
+ goto func_exit;
+ }
+
+ rec = page + offs;
}
if (UNIV_UNLIKELY(page_dir_get_n_heap(page) != count + 1)) {
@@ -2491,7 +2515,7 @@ wrong_page_type:
& REC_INFO_MIN_REC_FLAG)) {
if (page_has_prev(page)) {
ib::error() << "REC_INFO_MIN_REC_FLAG "
- "is set in on non-left page";
+ "is set on non-left page";
ret = false;
} else if (!page_is_leaf(page)) {
/* leftmost node pointer page */
@@ -2673,14 +2697,28 @@ n_owned_zero:
}
/* Check then the free list */
- for (rec = page_header_get_ptr(page, PAGE_FREE);
- rec;
- rec = page_rec_get_next_const(rec)) {
+ rec = page_header_get_ptr(page, PAGE_FREE);
+
+ while (rec != NULL) {
offsets = rec_get_offsets(rec, index, offsets,
page_is_leaf(page),
ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(!page_rec_validate(rec, offsets))) {
ret = FALSE;
+next_free:
+ const ulint offs = rec_get_next_offs(
+ rec, page_is_comp(page));
+ if (!offs) {
+ break;
+ }
+ if (UNIV_UNLIKELY(offs < PAGE_OLD_INFIMUM
+ || offs >= srv_page_size)) {
+ ib::error() << "Page free list is corrupted";
+ ret = FALSE;
+ break;
+ }
+
+ rec = page + offs;
continue;
}
@@ -2691,7 +2729,7 @@ n_owned_zero:
ib::error() << "Free record offset out of bounds: "
<< offs << '+' << i;
ret = FALSE;
- continue;
+ goto next_free;
}
while (i--) {
if (UNIV_UNLIKELY(buf[offs + i])) {
@@ -2702,6 +2740,8 @@ n_owned_zero:
}
buf[offs + i] = 1;
}
+
+ goto next_free;
}
if (UNIV_UNLIKELY(page_dir_get_n_heap(page) != count + 1)) {
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index e92d058b6d4..043a71733a5 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -646,6 +646,7 @@ rec_init_offsets(
break;
case REC_STATUS_INSTANT:
ut_ad(leaf);
+ ut_ad(index->is_instant());
rec_init_offsets_comp_ordinary(rec, index, offsets,
index->n_core_fields,
NULL,
@@ -787,6 +788,8 @@ resolved:
}
if (i < rec_offs_n_fields(offsets)) {
+ ut_ad(index->is_instant());
+
offs = (rec_offs_base(offsets)[i] & REC_OFFS_MASK)
| REC_OFFS_DEFAULT;
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index a45d9f59caf..9d48d90c962 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -2124,7 +2124,7 @@ row_import_cleanup(
DBUG_EXECUTE_IF("ib_import_before_checkpoint_crash", DBUG_SUICIDE(););
- log_make_checkpoint_at(LSN_MAX);
+ log_make_checkpoint();
return(err);
}
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index bb654f08cdb..5bb299193ed 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -1972,7 +1972,10 @@ row_ins_check_foreign_constraints(
dict_index_t* index, /*!< in: index */
bool pk, /*!< in: index->is_primary() */
dtuple_t* entry, /*!< in: index entry for index */
- que_thr_t* thr) /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread */
+ bool check_ref = true) /*!< in: TRUE if we want to check that
+ the referenced table is ok, FALSE if we
+ want to check the foreign key table */
{
dict_foreign_t* foreign;
dberr_t err;
@@ -2021,7 +2024,7 @@ row_ins_check_foreign_constraints(
table from being dropped while the check is running. */
err = row_ins_check_foreign_constraint(
- TRUE, foreign, table, entry, thr);
+ check_ref, foreign, table, entry, thr);
if (referenced_table) {
foreign->foreign_table->dec_fk_checks();
@@ -3266,7 +3269,10 @@ row_ins_sec_index_entry(
/*====================*/
dict_index_t* index, /*!< in: secondary index */
dtuple_t* entry, /*!< in/out: index entry to insert */
- que_thr_t* thr) /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread */
+ bool check_ref) /*!< in: true if we want to check that
+ the referenced table is ok, false if we
+ want to check the foreign key table */
{
dberr_t err;
mem_heap_t* offsets_heap;
@@ -3279,7 +3285,8 @@ row_ins_sec_index_entry(
if (!index->table->foreign_set.empty()) {
err = row_ins_check_foreign_constraints(index->table, index,
- false, entry, thr);
+ false, entry, thr,
+ check_ref);
if (err != DB_SUCCESS) {
return(err);
@@ -3354,7 +3361,7 @@ row_ins_index_entry(
if (index->is_primary()) {
return row_ins_clust_index_entry(index, entry, thr, 0);
} else {
- return row_ins_sec_index_entry(index, entry, thr);
+ return(row_ins_sec_index_entry(index, entry, thr, true));
}
}
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index db39b486d0e..88d08f13c25 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2509,7 +2509,8 @@ row_upd_sec_index_entry(
ut_a(entry);
/* Insert new index entry */
- err = row_ins_sec_index_entry(index, entry, thr);
+ err = row_ins_sec_index_entry(index, entry, thr,
+ node->is_delete != VERSIONED_DELETE);
func_exit:
mem_heap_free(heap);
@@ -3182,9 +3183,8 @@ row_upd_clust_step(
row_upd_eval_new_vals(node->update);
}
- if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
+ if (!node->is_delete && node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
- node->index = NULL;
err = row_upd_clust_rec(
flags, node, index, offsets, &heap, thr, &mtr);
goto exit_func;
@@ -3228,7 +3228,10 @@ row_upd_clust_step(
goto exit_func;
}
- node->state = UPD_NODE_UPDATE_SOME_SEC;
+ ut_ad(node->is_delete != PLAIN_DELETE);
+ node->state = node->is_delete ?
+ UPD_NODE_UPDATE_ALL_SEC :
+ UPD_NODE_UPDATE_SOME_SEC;
}
node->index = dict_table_get_next_index(index);
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index 6df284b1f15..54e822cbf66 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -640,12 +640,12 @@ static const ulint SRV_MASTER_SLOT = 0;
#ifdef HAVE_PSI_STAGE_INTERFACE
/** Performance schema stage event for monitoring ALTER TABLE progress
-everything after flush log_make_checkpoint_at(). */
+everything after flush log_make_checkpoint(). */
PSI_stage_info srv_stage_alter_table_end
= {0, "alter table (end)", PSI_FLAG_STAGE_PROGRESS};
/** Performance schema stage event for monitoring ALTER TABLE progress
-log_make_checkpoint_at(). */
+log_make_checkpoint(). */
PSI_stage_info srv_stage_alter_table_flush
= {0, "alter table (flush)", PSI_FLAG_STAGE_PROGRESS};
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index cc21f3c1fa4..29707e783af 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -508,7 +508,7 @@ create_log_files(
(log_sys.lsn - log_sys.last_checkpoint_lsn));
log_mutex_exit();
- log_make_checkpoint_at(LSN_MAX);
+ log_make_checkpoint();
return(DB_SUCCESS);
}
diff --git a/storage/mroonga/ha_mroonga.hpp b/storage/mroonga/ha_mroonga.hpp
index 348b5bdc1b3..20626742bb7 100644
--- a/storage/mroonga/ha_mroonga.hpp
+++ b/storage/mroonga/ha_mroonga.hpp
@@ -571,7 +571,12 @@ public:
void set_next_insert_id(ulonglong id);
void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values,
ulonglong *first_value, ulonglong *nb_reserved_values) mrn_override;
- void restore_auto_increment(ulonglong prev_insert_id);
+ /** Fix spurious -Werror=overloaded-virtual in GCC 9 */
+ void restore_auto_increment() mrn_override
+ {
+ handler::restore_auto_increment();
+ }
+ void restore_auto_increment(ulonglong prev_insert_id) mrn_override;
void release_auto_increment() mrn_override;
int check_for_upgrade(HA_CHECK_OPT *check_opt) mrn_override;
#ifdef MRN_HANDLER_HAVE_RESET_AUTO_INCREMENT