summaryrefslogtreecommitdiff
path: root/storage/innobase/btr
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-02-11 18:44:26 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-02-11 18:44:26 +0200
commitfc2f2fa85306d80c317e76933264ba3d3675ed1c (patch)
tree5f2beb7d399fa620968cb91687697e64eee48aab /storage/innobase/btr
parent8ccb3caafb7cba0fca12e89c5c9b67a740364fdd (diff)
downloadmariadb-git-fc2f2fa85306d80c317e76933264ba3d3675ed1c.tar.gz
MDEV-19747: Deprecate and ignore innodb_log_optimize_ddl
During native table rebuild or index creation, InnoDB used to skip redo logging and write MLOG_INDEX_LOAD records to inform crash recovery and Mariabackup of the gaps in redo log. This is fragile and prohibits some optimizations, such as skipping the doublewrite buffer for newly (re)initialized pages (MDEV-19738). row_merge_write_redo(): Remove. We do not write MLOG_INDEX_LOAD records any more. Instead, we write full redo log. FlushObserver: Remove. fseg_free_page_func(): Remove the parameter log. Redo logging cannot be disabled. fil_space_t::redo_skipped_count: Remove. We cannot remove buf_block_t::skip_flush_check, because PageBulk will temporarily generate invalid B-tree pages in the buffer pool.
Diffstat (limited to 'storage/innobase/btr')
-rw-r--r--storage/innobase/btr/btr0btr.cc4
-rw-r--r--storage/innobase/btr/btr0bulk.cc41
-rw-r--r--storage/innobase/btr/btr0cur.cc3
3 files changed, 11 insertions, 37 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 753ed9b077c..88af5003d25 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, 2019, MariaDB Corporation.
+Copyright (c) 2014, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -747,7 +747,7 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
: PAGE_HEADER + PAGE_BTR_SEG_TOP];
fseg_free_page(seg_header,
index->table->space, block->page.id.page_no(),
- block->index != NULL, !block->page.flush_observer, mtr);
+ block->index != NULL, mtr);
/* The page was marked free in the allocation bitmap, but it
should remain exclusively latched until mtr_t::commit() or until it
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index 51a4657a1c1..d0bccd11688 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,8 +33,6 @@ Created 03/11/2014 Shaohua Wang
/** Innodb B-tree index fill factor for bulk load. */
uint innobase_fill_factor;
-/** whether to reduce redo logging during ALTER TABLE */
-my_bool innodb_log_optimize_ddl;
/** Initialize members, allocate page if needed and start mtr.
Note: we commit all mtrs on failure.
@@ -51,13 +49,7 @@ PageBulk::init()
m_heap = mem_heap_create(1000);
m_mtr.start();
-
- if (m_flush_observer) {
- m_mtr.set_log_mode(MTR_LOG_NO_REDO);
- m_mtr.set_flush_observer(m_flush_observer);
- } else {
- m_index->set_modified(m_mtr);
- }
+ m_index->set_modified(m_mtr);
if (m_page_no == FIL_NULL) {
mtr_t alloc_mtr;
@@ -233,7 +225,7 @@ inline void PageBulk::insertPage(const rec_t *rec, offset_t *offsets)
m_heap_top += rec_size;
m_rec_no += 1;
- if (!m_flush_observer && !m_page_zip) {
+ if (!m_page_zip) {
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
page_cur_insert_rec_write_log(insert_rec, rec_size,
@@ -322,7 +314,7 @@ inline void PageBulk::finishPage()
ut_ad(!page_get_instant(m_page));
ut_ad(!mach_read_from_2(PAGE_HEADER + PAGE_N_DIRECTION + m_page));
- if (fmt != COMPRESSED && !m_flush_observer) {
+ if (fmt != COMPRESSED) {
m_mtr.write<2,mtr_t::OPT>(*m_block,
PAGE_HEADER + PAGE_N_DIR_SLOTS
+ m_page, 2 + slot_index);
@@ -671,13 +663,7 @@ dberr_t
PageBulk::latch()
{
m_mtr.start();
-
- if (m_flush_observer) {
- m_mtr.set_log_mode(MTR_LOG_NO_REDO);
- m_mtr.set_flush_observer(m_flush_observer);
- } else {
- m_index->set_modified(m_mtr);
- }
+ m_index->set_modified(m_mtr);
/* In case the block is S-latched by page_cleaner. */
if (!buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock,
@@ -720,7 +706,7 @@ BtrBulk::pageSplit(
/* 2. create a new page. */
PageBulk new_page_bulk(m_index, m_trx->id, FIL_NULL,
- page_bulk->getLevel(), m_flush_observer);
+ page_bulk->getLevel());
dberr_t err = new_page_bulk.init();
if (err != DB_SUCCESS) {
return(err);
@@ -855,7 +841,7 @@ BtrBulk::insert(
if (level + 1 > m_page_bulks.size()) {
PageBulk* new_page_bulk
= UT_NEW_NOKEY(PageBulk(m_index, m_trx->id, FIL_NULL,
- level, m_flush_observer));
+ level));
err = new_page_bulk->init();
if (err != DB_SUCCESS) {
UT_DELETE(new_page_bulk);
@@ -909,8 +895,7 @@ BtrBulk::insert(
/* Create a sibling page_bulk. */
PageBulk* sibling_page_bulk;
sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx->id,
- FIL_NULL, level,
- m_flush_observer));
+ FIL_NULL, level));
err = sibling_page_bulk->init();
if (err != DB_SUCCESS) {
UT_DELETE(sibling_page_bulk);
@@ -935,10 +920,6 @@ BtrBulk::insert(
/* Important: log_free_check whether we need a checkpoint. */
if (page_is_leaf(sibling_page_bulk->getPage())) {
if (trx_is_interrupted(m_trx)) {
- if (m_flush_observer) {
- m_flush_observer->interrupted();
- }
-
err = DB_INTERRUPTED;
goto func_exit;
}
@@ -1032,8 +1013,7 @@ BtrBulk::finish(dberr_t err)
mtr_t mtr;
buf_block_t* last_block;
PageBulk root_page_bulk(m_index, m_trx->id,
- m_index->page, m_root_level,
- m_flush_observer);
+ m_index->page, m_root_level);
mtr.start();
m_index->set_modified(mtr);
@@ -1057,9 +1037,6 @@ BtrBulk::finish(dberr_t err)
/* Remove last page. */
btr_page_free(m_index, last_block, &mtr);
- /* Do not flush the last page. */
- last_block->page.flush_observer = NULL;
-
mtr.commit();
err = pageCommit(&root_page_bulk, NULL, false);
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 3a52011cf03..2842600cdf6 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -7347,7 +7347,6 @@ struct btr_blob_log_check_t {
dict_index_t* index = m_pcur->index();
ulint offs = 0;
ulint page_no = ULINT_UNDEFINED;
- FlushObserver* observer = m_mtr->get_flush_observer();
if (UNIV_UNLIKELY(m_op == BTR_STORE_INSERT_BULK)) {
offs = page_offset(*m_rec);
@@ -7368,7 +7367,6 @@ struct btr_blob_log_check_t {
m_mtr->start();
m_mtr->set_log_mode(log_mode);
index->set_modified(*m_mtr);
- m_mtr->set_flush_observer(observer);
if (UNIV_UNLIKELY(m_op == BTR_STORE_INSERT_BULK)) {
m_pcur->btr_cur.page_cur.block = btr_block_get(
@@ -7563,7 +7561,6 @@ btr_store_big_rec_extern_fields(
mtr.start();
index->set_modified(mtr);
mtr.set_log_mode(btr_mtr->get_log_mode());
- mtr.set_flush_observer(btr_mtr->get_flush_observer());
buf_page_get(rec_block->page.id,
rec_block->zip_size(), RW_X_LATCH, &mtr);