summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2019-12-03 10:19:45 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2019-12-03 11:05:18 +0200
commit56f6dab1d0e5a464ea49c1e5efb0032a0f5cea3e (patch)
treee4c57ce4c3235cf512f5cf74e8031b9d041e510c
parent504823bcce5926bd5a20b8b8f202ed479ff6d750 (diff)
downloadmariadb-git-56f6dab1d0e5a464ea49c1e5efb0032a0f5cea3e.tar.gz
MDEV-21174: Replace mlog_write_ulint() with mtr_t::write()
mtr_t::write(): Replaces mlog_write_ulint(), mlog_write_ull(). Optimize away writes if the page contents does not change, except when a dummy write has been explicitly requested. Because the member function template takes a block descriptor as a parameter, it is possible to introduce better consistency checks. Due to this, the code for handling file-based lists, undo logs and user transactions was refactored to pass around buf_block_t.
-rw-r--r--storage/innobase/btr/btr0btr.cc561
-rw-r--r--storage/innobase/btr/btr0bulk.cc142
-rw-r--r--storage/innobase/btr/btr0cur.cc334
-rw-r--r--storage/innobase/buf/buf0dblwr.cc123
-rw-r--r--storage/innobase/dict/dict0boot.cc103
-rw-r--r--storage/innobase/dict/dict0crea.cc25
-rw-r--r--storage/innobase/dict/dict0dict.cc9
-rw-r--r--storage/innobase/dict/dict0load.cc3
-rw-r--r--storage/innobase/fil/fil0crypt.cc5
-rw-r--r--storage/innobase/fil/fil0fil.cc4
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc1010
-rw-r--r--storage/innobase/fut/fut0lst.cc689
-rw-r--r--storage/innobase/gis/gis0rtree.cc35
-rw-r--r--storage/innobase/handler/ha_innodb.cc12
-rw-r--r--storage/innobase/handler/handler0alter.cc6
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc406
-rw-r--r--storage/innobase/include/btr0btr.h2
-rw-r--r--storage/innobase/include/btr0btr.ic129
-rw-r--r--storage/innobase/include/btr0bulk.h20
-rw-r--r--storage/innobase/include/btr0cur.h9
-rw-r--r--storage/innobase/include/buf0buf.h14
-rw-r--r--storage/innobase/include/buf0buf.ic19
-rw-r--r--storage/innobase/include/dict0boot.h13
-rw-r--r--storage/innobase/include/fsp0fsp.h9
-rw-r--r--storage/innobase/include/fut0lst.h158
-rw-r--r--storage/innobase/include/fut0lst.ic80
-rw-r--r--storage/innobase/include/mtr0log.h82
-rw-r--r--storage/innobase/include/mtr0log.ic24
-rw-r--r--storage/innobase/include/mtr0mtr.h43
-rw-r--r--storage/innobase/include/mtr0types.h2
-rw-r--r--storage/innobase/include/page0page.h57
-rw-r--r--storage/innobase/include/page0page.ic76
-rw-r--r--storage/innobase/include/trx0purge.h10
-rw-r--r--storage/innobase/include/trx0rseg.h47
-rw-r--r--storage/innobase/include/trx0rseg.ic61
-rw-r--r--storage/innobase/include/trx0types.h2
-rw-r--r--storage/innobase/include/trx0undo.h146
-rw-r--r--storage/innobase/include/trx0undo.ic120
-rw-r--r--storage/innobase/log/log0log.cc2
-rw-r--r--storage/innobase/log/log0recv.cc2
-rw-r--r--storage/innobase/mtr/mtr0log.cc120
-rw-r--r--storage/innobase/page/page0page.cc133
-rw-r--r--storage/innobase/row/row0purge.cc11
-rw-r--r--storage/innobase/row/row0uins.cc22
-rw-r--r--storage/innobase/row/row0undo.cc35
-rw-r--r--storage/innobase/row/row0upd.cc2
-rw-r--r--storage/innobase/srv/srv0start.cc6
-rw-r--r--storage/innobase/trx/trx0purge.cc192
-rw-r--r--storage/innobase/trx/trx0rec.cc70
-rw-r--r--storage/innobase/trx/trx0rseg.cc205
-rw-r--r--storage/innobase/trx/trx0sys.cc34
-rw-r--r--storage/innobase/trx/trx0trx.cc18
-rw-r--r--storage/innobase/trx/trx0undo.cc870
53 files changed, 2758 insertions, 3554 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index dea97787a88..4fb0fe6cd4c 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -268,8 +268,7 @@ btr_root_get(
/* Intended to be used for segment list access.
SX lock doesn't block reading user data by other threads.
And block the segment list access by others.*/
- buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH,
- mtr);
+ buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr);
return(root ? buf_block_get_frame(root) : NULL);
}
@@ -442,22 +441,25 @@ btr_page_create(
page_t* page = buf_block_get_frame(block);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
+ byte *index_id= &page[PAGE_HEADER + PAGE_INDEX_ID];
- if (page_zip) {
+ if (UNIV_LIKELY_NULL(page_zip)) {
page_create_zip(block, index, level, 0, mtr);
+ mach_write_to_8(index_id, index->id);
+ page_zip_write_header(page_zip, index_id, 8, mtr);
} else {
page_create(block, mtr, dict_table_is_comp(index->table),
dict_index_is_spatial(index));
/* Set the level of the new index page */
- btr_page_set_level(page, NULL, level, mtr);
+ mtr->write<2,mtr_t::OPT>(*block, PAGE_HEADER + PAGE_LEVEL
+ + block->frame, level);
+ mtr->write<8,mtr_t::OPT>(*block, index_id, index->id);
}
/* For Spatial Index, initialize the Split Sequence Number */
if (dict_index_is_spatial(index)) {
page_set_ssn_id(block, page_zip, 0, mtr);
}
-
- btr_page_set_index_id(page, page_zip, index->id, mtr);
}
/**************************************************************//**
@@ -471,15 +473,13 @@ btr_page_alloc_for_ibuf(
dict_index_t* index, /*!< in: index tree */
mtr_t* mtr) /*!< in: mtr */
{
- fil_addr_t node_addr;
- page_t* root;
- page_t* new_page;
buf_block_t* new_block;
- root = btr_root_get(index, mtr);
+ buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr);
- node_addr = flst_get_first(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST
- + root);
+ fil_addr_t node_addr = flst_get_first(PAGE_HEADER
+ + PAGE_BTR_IBUF_FREE_LIST
+ + root->frame);
ut_a(node_addr.page != FIL_NULL);
new_block = buf_page_get(
@@ -487,14 +487,12 @@ btr_page_alloc_for_ibuf(
index->table->space->zip_size(),
RW_X_LATCH, mtr);
- new_page = buf_block_get_frame(new_block);
buf_block_dbg_add_level(new_block, SYNC_IBUF_TREE_NODE_NEW);
- flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- new_page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
+ flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ new_block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
mtr);
- ut_ad(flst_validate(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- mtr));
+ ut_d(flst_validate(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr));
return(new_block);
}
@@ -525,16 +523,12 @@ btr_page_alloc_low(
is already X-latched in mtr, do
not initialize the page. */
{
- fseg_header_t* seg_header;
- page_t* root;
-
- root = btr_root_get(index, mtr);
+ page_t* root = btr_root_get(index, mtr);
- if (level == 0) {
- seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
- } else {
- seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP;
- }
+ fseg_header_t* seg_header = (level
+ ? PAGE_HEADER + PAGE_BTR_SEG_TOP
+ : PAGE_HEADER + PAGE_BTR_SEG_LEAF)
+ + root;
/* Parameter TRUE below states that the caller has made the
reservation for free extents, and thus we know that a page can
@@ -716,17 +710,14 @@ btr_page_free_for_ibuf(
buf_block_t* block, /*!< in: block to be freed, x-latched */
mtr_t* mtr) /*!< in: mtr */
{
- page_t* root;
-
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
- root = btr_root_get(index, mtr);
- flst_add_first(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- buf_block_get_frame(block)
- + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
+ buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr);
+
+ flst_add_first(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
- ut_ad(flst_validate(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- mtr));
+ ut_d(flst_validate(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr));
}
/** Free an index page.
@@ -779,39 +770,29 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
srv_immediate_scrub_data_uncompressed is set. */
}
-/**************************************************************//**
+/** Set the child page number in a node pointer record.
+@param[in,out] block non-leaf index page
+@param[in,out] rec node pointer record in the page
+@param[in] offsets rec_get_offsets(rec)
+@param[in] page_no child page number
+@param[in,out] mtr mini-transaction
Sets the child node file address in a node pointer. */
-UNIV_INLINE
-void
-btr_node_ptr_set_child_page_no(
-/*===========================*/
- rec_t* rec, /*!< in: node pointer record */
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
- part will be updated, or NULL */
- const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
- ulint page_no,/*!< in: child node address */
- mtr_t* mtr) /*!< in: mtr */
+inline void btr_node_ptr_set_child_page_no(buf_block_t *block,
+ rec_t *rec, const ulint *offsets,
+ ulint page_no, mtr_t *mtr)
{
- byte* field;
- ulint len;
-
- ut_ad(rec_offs_validate(rec, NULL, offsets));
- ut_ad(!page_rec_is_leaf(rec));
- ut_ad(!rec_offs_comp(offsets) || rec_get_node_ptr_flag(rec));
-
- /* The child address is in the last field */
- field = rec_get_nth_field(rec, offsets,
- rec_offs_n_fields(offsets) - 1, &len);
-
- ut_ad(len == REC_NODE_PTR_SIZE);
-
- if (page_zip) {
- page_zip_write_node_ptr(page_zip, rec,
- rec_offs_data_size(offsets),
- page_no, mtr);
- } else {
- mlog_write_ulint(field, page_no, MLOG_4BYTES, mtr);
- }
+ ut_ad(rec_offs_validate(rec, NULL, offsets));
+ ut_ad(!page_rec_is_leaf(rec));
+ ut_ad(!rec_offs_comp(offsets) || rec_get_node_ptr_flag(rec));
+
+ const ulint offs= rec_offs_data_size(offsets);
+ ut_ad(rec_offs_nth_size(offsets, rec_offs_n_fields(offsets) - 1) ==
+ REC_NODE_PTR_SIZE);
+
+ if (UNIV_LIKELY_NULL(block->page.zip.data))
+ page_zip_write_node_ptr(&block->page.zip, rec, offs, page_no, mtr);
+ else
+ mtr->write<4>(*block, rec + offs - REC_NODE_PTR_SIZE, page_no);
}
/************************************************************//**
@@ -986,7 +967,7 @@ void btr_page_get_father(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
}
/** PAGE_INDEX_ID value for freed index B-trees */
-static const index_id_t BTR_FREED_INDEX_ID = 0;
+constexpr index_id_t BTR_FREED_INDEX_ID = 0;
/** Free a B-tree root page. btr_free_but_not_root() must already
have been called.
@@ -995,30 +976,33 @@ before mtr.commit().
@param[in,out] block index root page
@param[in,out] mtr mini-transaction
@param[in] invalidate whether to invalidate PAGE_INDEX_ID */
-static void btr_free_root(buf_block_t* block, mtr_t* mtr, bool invalidate)
+static void btr_free_root(buf_block_t *block, mtr_t *mtr, bool invalidate)
{
- fseg_header_t* header;
+ ut_ad(mtr_memo_contains_flagged(mtr, block,
+ MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr->is_named_space(block->page.id.space()));
- ut_ad(mtr_memo_contains_flagged(mtr, block, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr->is_named_space(block->page.id.space()));
-
- btr_search_drop_page_hash_index(block);
+ btr_search_drop_page_hash_index(block);
- header = buf_block_get_frame(block) + PAGE_HEADER + PAGE_BTR_SEG_TOP;
#ifdef UNIV_BTR_DEBUG
- ut_a(btr_root_fseg_validate(header, block->page.id.space()));
+ ut_a(btr_root_fseg_validate(PAGE_HEADER + PAGE_BTR_SEG_TOP + block->frame,
+ block->page.id.space()));
#endif /* UNIV_BTR_DEBUG */
- if (invalidate) {
- btr_page_set_index_id(
- buf_block_get_frame(block),
- buf_block_get_page_zip(block),
- BTR_FREED_INDEX_ID, mtr);
- }
-
- while (!fseg_free_step(header, true, mtr)) {
- /* Free the entire segment in small steps. */
- }
+ if (invalidate)
+ {
+ byte *page_index_id= PAGE_HEADER + PAGE_INDEX_ID + block->frame;
+ if (UNIV_LIKELY_NULL(block->page.zip.data))
+ {
+ mach_write_to_8(page_index_id, BTR_FREED_INDEX_ID);
+ page_zip_write_header(&block->page.zip, page_index_id, 8, mtr);
+ }
+ else
+ mtr->write<8,mtr_t::OPT>(*block, page_index_id, BTR_FREED_INDEX_ID);
+ }
+
+ /* Free the entire segment in small steps. */
+ while (!fseg_free_step(PAGE_HEADER + PAGE_BTR_SEG_TOP + block->frame,
+ true, mtr));
}
/** Prepare to free a B-tree.
@@ -1076,8 +1060,6 @@ btr_create(
mtr_t* mtr)
{
buf_block_t* block;
- page_t* page;
- page_zip_des_t* page_zip;
ut_ad(mtr->is_named_space(space));
ut_ad(index_id != BTR_FREED_INDEX_ID);
@@ -1144,30 +1126,29 @@ btr_create(
buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW);
}
- /* Create a new index page on the allocated segment page */
- page_zip = buf_block_get_page_zip(block);
+ byte* page_index_id = PAGE_HEADER + PAGE_INDEX_ID + block->frame;
- if (page_zip) {
- page = page_create_zip(block, index, 0, 0, mtr);
+ /* Create a new index page on the allocated segment page */
+ if (UNIV_LIKELY_NULL(block->page.zip.data)) {
+ page_create_zip(block, index, 0, 0, mtr);
+ mach_write_to_8(page_index_id, index_id);
+ page_zip_write_header(&block->page.zip, page_index_id, 8, mtr);
+ static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
+ memset_aligned<8>(FIL_PAGE_PREV + block->page.zip.data,
+ 0xff, 8);
} else {
- page = page_create(block, mtr,
- dict_table_is_comp(index->table),
- dict_index_is_spatial(index));
+ page_create(block, mtr, index->table->not_redundant(),
+ index->is_spatial());
/* Set the level of the new index page */
- btr_page_set_level(page, NULL, 0, mtr);
+ mtr->write<2,mtr_t::OPT>(*block, PAGE_HEADER + PAGE_LEVEL
+ + block->frame, 0U);
+ mtr->write<8,mtr_t::OPT>(*block, page_index_id, index_id);
}
- /* Set the index id of the page */
- btr_page_set_index_id(page, page_zip, index_id, mtr);
-
/* Set the next node and previous node fields */
compile_time_assert(FIL_PAGE_NEXT == FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(block, FIL_PAGE_PREV, 8, 0xff, mtr);
- if (UNIV_LIKELY_NULL(page_zip)) {
- static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
- memset_aligned<8>(FIL_PAGE_PREV + page_zip->data, 0xff, 8);
- }
/* We reset the free bits for the page in a separate
mini-transaction to allow creation of several trees in the
@@ -1184,7 +1165,8 @@ btr_create(
allowed size fit on the root page: this fact is needed to ensure
correctness of split algorithms */
- ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE);
+ ut_ad(page_get_max_insert_size(block->frame, 2)
+ > 2 * BTR_PAGE_MAX_REC_SIZE);
return(block->page.id.page_no());
}
@@ -1199,7 +1181,6 @@ btr_free_but_not_root(
buf_block_t* block,
mtr_log_t log_mode)
{
- ibool finished;
mtr_t mtr;
ut_ad(fil_page_index_page_check(block->frame));
@@ -1226,8 +1207,8 @@ leaf_loop:
/* NOTE: page hash indexes are dropped when a page is freed inside
fsp0fsp. */
- finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF,
- true, &mtr);
+ bool finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF,
+ true, &mtr);
mtr_commit(&mtr);
if (!finished) {
@@ -1390,7 +1371,7 @@ btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset)
page_set_autoinc(buf_page_get(page_id_t(space->id, index->page),
space->zip_size(),
RW_SX_LATCH, &mtr),
- index, autoinc, &mtr, reset);
+ autoinc, &mtr, reset);
mtr.commit();
}
@@ -1424,7 +1405,6 @@ btr_page_reorganize_low(
page_t* page = buf_block_get_frame(block);
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
buf_block_t* temp_block;
- page_t* temp_page;
ulint data_size1;
ulint data_size2;
ulint max_ins_size1;
@@ -1451,7 +1431,6 @@ btr_page_reorganize_low(
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
temp_block = buf_block_alloc(buf_pool);
- temp_page = temp_block->frame;
MONITOR_INC(MONITOR_INDEX_REORG_ATTEMPTS);
@@ -1480,12 +1459,14 @@ btr_page_reorganize_low(
do not copy the lock bits yet */
page_copy_rec_list_end_no_locks(block, temp_block,
- page_get_infimum_rec(temp_page),
+ page_get_infimum_rec(temp_block->frame),
index, mtr);
/* Copy the PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC. */
- memcpy(page + (PAGE_HEADER + PAGE_MAX_TRX_ID),
- temp_page + (PAGE_HEADER + PAGE_MAX_TRX_ID), 8);
+ static_assert((PAGE_HEADER + PAGE_MAX_TRX_ID) % 8 == 0, "alignment");
+ memcpy_aligned<8>(&block->frame[PAGE_HEADER + PAGE_MAX_TRX_ID],
+ &temp_block->frame[PAGE_HEADER + PAGE_MAX_TRX_ID],
+ 8);
/* PAGE_MAX_TRX_ID is unused in clustered index pages
(other than the root where it is repurposed as PAGE_ROOT_AUTO_INC),
non-leaf pages, and in temporary tables. It was always
@@ -1496,15 +1477,15 @@ btr_page_reorganize_low(
During redo log apply, dict_index_is_sec_or_ibuf() always
holds, even for clustered indexes. */
ut_ad(recovery || index->table->is_temporary()
- || !page_is_leaf(temp_page)
+ || !page_is_leaf(temp_block->frame)
|| !dict_index_is_sec_or_ibuf(index)
- || page_get_max_trx_id(page) != 0);
+ || page_get_max_trx_id(block->frame) != 0);
/* PAGE_MAX_TRX_ID must be zero on non-leaf pages other than
clustered index root pages. */
ut_ad(recovery
- || page_get_max_trx_id(page) == 0
+ || page_get_max_trx_id(block->frame) == 0
|| (dict_index_is_sec_or_ibuf(index)
- ? page_is_leaf(temp_page)
+ ? page_is_leaf(temp_block->frame)
: block->page.id.page_no() == index->page));
/* If innodb_log_compressed_pages is ON, page reorganize should log the
@@ -1521,29 +1502,32 @@ btr_page_reorganize_low(
/* Restore the old page and exit. */
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
/* Check that the bytes that we skip are identical. */
- ut_a(!memcmp(page, temp_page, PAGE_HEADER));
- ut_a(!memcmp(PAGE_HEADER + PAGE_N_RECS + page,
- PAGE_HEADER + PAGE_N_RECS + temp_page,
+ ut_a(!memcmp(page, temp_block->frame, PAGE_HEADER));
+ ut_a(!memcmp(PAGE_HEADER + PAGE_N_RECS + block->frame,
+ PAGE_HEADER + PAGE_N_RECS + temp_block->frame,
PAGE_DATA - (PAGE_HEADER + PAGE_N_RECS)));
- ut_a(!memcmp(srv_page_size - FIL_PAGE_DATA_END + page,
- srv_page_size - FIL_PAGE_DATA_END + temp_page,
- FIL_PAGE_DATA_END));
+ ut_a(!memcmp(srv_page_size - FIL_PAGE_DATA_END
+ + block->frame,
+ srv_page_size - FIL_PAGE_DATA_END
+ + temp_block->frame, FIL_PAGE_DATA_END));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
- memcpy(PAGE_HEADER + page, PAGE_HEADER + temp_page,
- PAGE_N_RECS - PAGE_N_DIR_SLOTS);
- memcpy(PAGE_DATA + page, PAGE_DATA + temp_page,
+ memcpy_aligned<2>(PAGE_HEADER + block->frame,
+ PAGE_HEADER + temp_block->frame,
+ PAGE_N_RECS - PAGE_N_DIR_SLOTS);
+ memcpy(PAGE_DATA + block->frame, PAGE_DATA + temp_block->frame,
srv_page_size - PAGE_DATA - FIL_PAGE_DATA_END);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
- ut_a(!memcmp(page, temp_page, srv_page_size));
+ ut_a(!memcmp(block->frame, temp_block->frame, srv_page_size));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
goto func_exit;
}
- data_size2 = page_get_data_size(page);
- max_ins_size2 = page_get_max_insert_size_after_reorganize(page, 1);
+ data_size2 = page_get_data_size(block->frame);
+ max_ins_size2 = page_get_max_insert_size_after_reorganize(block->frame,
+ 1);
if (data_size1 != data_size2 || max_ins_size1 != max_ins_size2) {
ib::error()
@@ -1560,39 +1544,41 @@ btr_page_reorganize_low(
/* Restore the cursor position. */
if (pos > 0) {
- cursor->rec = page_rec_get_nth(page, pos);
+ cursor->rec = page_rec_get_nth(block->frame, pos);
} else {
- ut_ad(cursor->rec == page_get_infimum_rec(page));
+ ut_ad(cursor->rec == page_get_infimum_rec(block->frame));
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+ ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
if (!recovery) {
if (block->page.id.page_no() == index->page
- && fil_page_get_type(temp_page) == FIL_PAGE_TYPE_INSTANT) {
+ && fil_page_get_type(temp_block->frame)
+ == FIL_PAGE_TYPE_INSTANT) {
/* Preserve the PAGE_INSTANT information. */
ut_ad(!page_zip);
ut_ad(index->is_instant());
static_assert(!(FIL_PAGE_TYPE % 2), "alignment");
- memcpy_aligned<2>(FIL_PAGE_TYPE + page,
- FIL_PAGE_TYPE + temp_page, 2);
+ memcpy_aligned<2>(FIL_PAGE_TYPE + block->frame,
+ FIL_PAGE_TYPE + temp_block->frame, 2);
static_assert(!((PAGE_HEADER+PAGE_INSTANT) % 2), "");
- memcpy_aligned<2>(PAGE_HEADER + PAGE_INSTANT + page,
+ memcpy_aligned<2>(PAGE_HEADER + PAGE_INSTANT
+ + block->frame,
PAGE_HEADER + PAGE_INSTANT
- + temp_page, 2);
+ + temp_block->frame, 2);
if (!index->table->instant) {
- } else if (page_is_comp(page)) {
- memcpy(PAGE_NEW_INFIMUM + page,
- PAGE_NEW_INFIMUM + temp_page, 8);
- memcpy(PAGE_NEW_SUPREMUM + page,
- PAGE_NEW_SUPREMUM + temp_page, 8);
+ } else if (page_is_comp(block->frame)) {
+ memcpy(PAGE_NEW_INFIMUM + block->frame,
+ PAGE_NEW_INFIMUM + temp_block->frame, 8);
+ memcpy(PAGE_NEW_SUPREMUM + block->frame,
+ PAGE_NEW_SUPREMUM + temp_block->frame, 8);
} else {
- memcpy(PAGE_OLD_INFIMUM + page,
- PAGE_OLD_INFIMUM + temp_page, 8);
- memcpy(PAGE_OLD_SUPREMUM + page,
- PAGE_OLD_SUPREMUM + temp_page, 8);
+ memcpy(PAGE_OLD_INFIMUM + block->frame,
+ PAGE_OLD_INFIMUM + temp_block->frame, 8);
+ memcpy(PAGE_OLD_SUPREMUM + block->frame,
+ PAGE_OLD_SUPREMUM + temp_block->frame, 8);
}
}
@@ -1637,24 +1623,29 @@ func_exit:
MONITOR_INC(MONITOR_INDEX_REORG_SUCCESSFUL);
}
- if (UNIV_UNLIKELY(fil_page_get_type(page) == FIL_PAGE_TYPE_INSTANT)) {
+ if (UNIV_UNLIKELY(fil_page_get_type(block->frame)
+ == FIL_PAGE_TYPE_INSTANT)) {
/* Log the PAGE_INSTANT information. */
ut_ad(!page_zip);
ut_ad(index->is_instant());
ut_ad(!recovery);
- mlog_write_ulint(FIL_PAGE_TYPE + page, FIL_PAGE_TYPE_INSTANT,
- MLOG_2BYTES, mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_INSTANT + page,
- mach_read_from_2(PAGE_HEADER + PAGE_INSTANT
- + page),
- MLOG_2BYTES, mtr);
+ mtr->write<2,mtr_t::FORCED>(*block, FIL_PAGE_TYPE
+ + block->frame,
+ FIL_PAGE_TYPE_INSTANT);
+ byte* instant = PAGE_HEADER + PAGE_INSTANT + block->frame;
+ mtr->write<2,mtr_t::FORCED>(*block, instant,
+ mach_read_from_2(instant));
if (!index->table->instant) {
- } else if (page_is_comp(page)) {
- mlog_log_string(PAGE_NEW_INFIMUM + page, 8, mtr);
- mlog_log_string(PAGE_NEW_SUPREMUM + page, 8, mtr);
+ } else if (page_is_comp(block->frame)) {
+ mlog_log_string(PAGE_NEW_INFIMUM + block->frame, 8,
+ mtr);
+ mlog_log_string(PAGE_NEW_SUPREMUM + block->frame, 8,
+ mtr);
} else {
- mlog_log_string(PAGE_OLD_INFIMUM + page, 8, mtr);
- mlog_log_string(PAGE_OLD_SUPREMUM + page, 8, mtr);
+ mlog_log_string(PAGE_OLD_INFIMUM + block->frame, 8,
+ mtr);
+ mlog_log_string(PAGE_OLD_SUPREMUM + block->frame, 8,
+ mtr);
}
}
@@ -1770,14 +1761,12 @@ btr_page_empty(
ulint level,
mtr_t* mtr)
{
- page_t* page = buf_block_get_frame(block);
-
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_zip == buf_block_get_page_zip(block));
ut_ad(!index->is_dummy);
ut_ad(index->table->space->id == block->page.id.space());
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+ ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
btr_search_drop_page_hash_index(block);
@@ -1790,7 +1779,7 @@ btr_page_empty(
const ib_uint64_t autoinc
= dict_index_is_clust(index)
&& index->page == block->page.id.page_no()
- ? page_get_autoinc(page)
+ ? page_get_autoinc(block->frame)
: 0;
if (page_zip) {
@@ -1798,10 +1787,11 @@ btr_page_empty(
} else {
page_create(block, mtr, dict_table_is_comp(index->table),
dict_index_is_spatial(index));
- btr_page_set_level(page, NULL, level, mtr);
+ mtr->write<2,mtr_t::OPT>(*block, PAGE_HEADER + PAGE_LEVEL
+ + block->frame, level);
if (autoinc) {
- mlog_write_ull(PAGE_HEADER + PAGE_MAX_TRX_ID + page,
- autoinc, mtr);
+ mtr->write<8>(*block, PAGE_HEADER + PAGE_MAX_TRX_ID
+ + block->frame, autoinc);
}
}
}
@@ -1848,20 +1838,19 @@ void btr_set_instant(buf_block_t* root, const dict_index_t& index, mtr_t* mtr)
|| !page_get_instant(root->frame));
ut_ad(!memcmp(infimum, "infimum", 8));
ut_ad(!memcmp(supremum, "supremum", 8));
- mlog_write_ulint(page_type, FIL_PAGE_TYPE_INSTANT,
- MLOG_2BYTES, mtr);
+ mtr->write<2>(*root, page_type, FIL_PAGE_TYPE_INSTANT);
ut_ad(i <= PAGE_NO_DIRECTION);
i |= index.n_core_fields << 3;
- mlog_write_ulint(PAGE_HEADER + PAGE_INSTANT + root->frame, i,
- MLOG_2BYTES, mtr);
+ mtr->write<2>(*root, PAGE_HEADER + PAGE_INSTANT + root->frame,
+ i);
break;
}
if (index.table->instant) {
mlog_memset(root, infimum - root->frame, 8, 0, mtr);
mlog_memset(root, supremum - root->frame, 7, 0, mtr);
- mlog_write_ulint(&supremum[7], index.n_core_null_bytes,
- MLOG_1BYTE, mtr);
+ mtr->write<1,mtr_t::OPT>(*root, &supremum[7],
+ index.n_core_null_bytes);
}
}
@@ -1887,8 +1876,6 @@ btr_root_raise_and_insert(
mtr_t* mtr) /*!< in: mtr */
{
dict_index_t* index;
- page_t* root;
- page_t* new_page;
ulint new_page_no;
rec_t* rec;
dtuple_t* node_ptr;
@@ -1897,40 +1884,40 @@ btr_root_raise_and_insert(
page_cur_t* page_cursor;
page_zip_des_t* root_page_zip;
page_zip_des_t* new_page_zip;
- buf_block_t* root_block;
+ buf_block_t* root;
buf_block_t* new_block;
- root = btr_cur_get_page(cursor);
- root_block = btr_cur_get_block(cursor);
- root_page_zip = buf_block_get_page_zip(root_block);
- ut_ad(!page_is_empty(root));
+ root = btr_cur_get_block(cursor);
+ root_page_zip = buf_block_get_page_zip(root);
+ ut_ad(!page_is_empty(root->frame));
index = btr_cur_get_index(cursor);
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!root_page_zip || page_zip_validate(root_page_zip, root, index));
+ ut_a(!root_page_zip || page_zip_validate(root_page_zip, root->frame,
+ index));
#endif /* UNIV_ZIP_DEBUG */
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
ulint space = index->table->space_id;
ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF
- + root, space));
+ + root->frame, space));
ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP
- + root, space));
+ + root->frame, space));
}
- ut_a(dict_index_get_page(index) == page_get_page_no(root));
+ ut_a(dict_index_get_page(index) == page_get_page_no(root->frame));
#endif /* UNIV_BTR_DEBUG */
ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
- ut_ad(mtr_memo_contains(mtr, root_block, MTR_MEMO_PAGE_X_FIX));
+ ut_ad(mtr_memo_contains(mtr, root, MTR_MEMO_PAGE_X_FIX));
/* Allocate a new page to the tree. Root splitting is done by first
moving the root records to the new page, emptying the root, putting
a node pointer to the new page, and then splitting the new page. */
- level = btr_page_get_level(root);
+ level = btr_page_get_level(root->frame);
new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr, mtr);
@@ -1938,7 +1925,6 @@ btr_root_raise_and_insert(
return(NULL);
}
- new_page = buf_block_get_frame(new_block);
new_page_zip = buf_block_get_page_zip(new_block);
ut_a(!new_page_zip == !root_page_zip);
ut_a(!new_page_zip
@@ -1948,12 +1934,17 @@ btr_root_raise_and_insert(
btr_page_create(new_block, new_page_zip, index, level, mtr);
/* Set the next node and previous node fields of new page */
- compile_time_assert(FIL_PAGE_NEXT == FIL_PAGE_PREV + 4);
- compile_time_assert(FIL_NULL == 0xffffffff);
- mlog_memset(new_block, FIL_PAGE_PREV, 8, 0xff, mtr);
- if (UNIV_LIKELY_NULL(new_page_zip)) {
- static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
- memset_aligned<8>(new_page_zip->data + FIL_PAGE_PREV, 0xff, 8);
+ if (!page_has_siblings(new_block->frame)) {
+ ut_ad(index->is_ibuf());
+ } else {
+ compile_time_assert(FIL_PAGE_NEXT == FIL_PAGE_PREV + 4);
+ compile_time_assert(FIL_NULL == 0xffffffff);
+ mlog_memset(new_block, FIL_PAGE_PREV, 8, 0xff, mtr);
+ if (UNIV_LIKELY_NULL(new_page_zip)) {
+ static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
+ memset_aligned<8>(new_page_zip->data + FIL_PAGE_PREV,
+ 0xff, 8);
+ }
}
/* Copy the records from root to the new page one by one. */
@@ -1962,25 +1953,25 @@ btr_root_raise_and_insert(
#ifdef UNIV_ZIP_COPY
|| new_page_zip
#endif /* UNIV_ZIP_COPY */
- || !page_copy_rec_list_end(new_block, root_block,
- page_get_infimum_rec(root),
+ || !page_copy_rec_list_end(new_block, root,
+ page_get_infimum_rec(root->frame),
index, mtr)) {
ut_a(new_page_zip);
/* Copy the page byte for byte. */
page_zip_copy_recs(new_block,
- root_page_zip, root, index, mtr);
+ root_page_zip, root->frame, index, mtr);
/* Update the lock table and possible hash index. */
- lock_move_rec_list_end(new_block, root_block,
- page_get_infimum_rec(root));
+ lock_move_rec_list_end(new_block, root,
+ page_get_infimum_rec(root->frame));
/* Move any existing predicate locks */
if (dict_index_is_spatial(index)) {
- lock_prdt_rec_move(new_block, root_block);
+ lock_prdt_rec_move(new_block, root);
} else {
btr_search_move_or_delete_hash_entries(
- new_block, root_block);
+ new_block, root);
}
}
@@ -1990,25 +1981,29 @@ btr_root_raise_and_insert(
the field only matters on leaf pages, and the root no
longer is a leaf page. (Older versions of InnoDB did
set PAGE_MAX_TRX_ID on all secondary index pages.) */
- if (root_page_zip) {
- byte* p = PAGE_HEADER + PAGE_MAX_TRX_ID + root;
- memset(p, 0, 8);
+ byte* p = static_cast<byte*>(
+ MY_ASSUME_ALIGNED(PAGE_HEADER + PAGE_MAX_TRX_ID
+ + root->frame, 8));
+ if (UNIV_LIKELY_NULL(root_page_zip)) {
+ memset_aligned<8>(p, 0, 8);
page_zip_write_header(root_page_zip, p, 8, mtr);
- } else {
- mlog_write_ull(PAGE_HEADER + PAGE_MAX_TRX_ID
- + root, 0, mtr);
+ } else if (mach_read_from_8(p)) {
+ mlog_memset(root, PAGE_HEADER + PAGE_MAX_TRX_ID, 8, 0,
+ mtr);
}
} else {
/* PAGE_ROOT_AUTO_INC is only present in the clustered index
root page; on other clustered index pages, we want to reserve
the field PAGE_MAX_TRX_ID for future use. */
- if (new_page_zip) {
- byte* p = PAGE_HEADER + PAGE_MAX_TRX_ID + new_page;
- memset(p, 0, 8);
+ byte* p = static_cast<byte*>(
+ MY_ASSUME_ALIGNED(PAGE_HEADER + PAGE_MAX_TRX_ID
+ + new_block->frame, 8));
+ if (UNIV_LIKELY_NULL(new_page_zip)) {
+ memset_aligned<8>(p, 0, 8);
page_zip_write_header(new_page_zip, p, 8, mtr);
- } else {
- mlog_write_ull(PAGE_HEADER + PAGE_MAX_TRX_ID
- + new_page, 0, mtr);
+ } else if (mach_read_from_8(p)) {
+ mlog_memset(new_block, PAGE_HEADER + PAGE_MAX_TRX_ID,
+ 8, 0, mtr);
}
}
@@ -2018,7 +2013,7 @@ btr_root_raise_and_insert(
root page: we cannot discard the lock structs on the root page */
if (!dict_table_is_locking_disabled(index->table)) {
- lock_update_root_raise(new_block, root_block);
+ lock_update_root_raise(new_block, root);
}
/* Create a memory heap where the node pointer is stored */
@@ -2026,7 +2021,7 @@ btr_root_raise_and_insert(
*heap = mem_heap_create(1000);
}
- rec = page_rec_get_next(page_get_infimum_rec(new_page));
+ rec = page_rec_get_next(page_get_infimum_rec(new_block->frame));
new_page_no = new_block->page.id.page_no();
/* Build the node pointer (= node key and page address) for the
@@ -2049,28 +2044,28 @@ btr_root_raise_and_insert(
| REC_INFO_MIN_REC_FLAG);
/* Rebuild the root page to get free space */
- btr_page_empty(root_block, root_page_zip, index, level + 1, mtr);
+ btr_page_empty(root, root_page_zip, index, level + 1, mtr);
/* btr_page_empty() is supposed to zero-initialize the field. */
- ut_ad(!page_get_instant(root_block->frame));
+ ut_ad(!page_get_instant(root->frame));
if (index->is_instant()) {
ut_ad(!root_page_zip);
- btr_set_instant(root_block, *index, mtr);
+ btr_set_instant(root, *index, mtr);
}
- ut_ad(!page_has_siblings(root));
+ ut_ad(!page_has_siblings(root->frame));
page_cursor = btr_cur_get_page_cur(cursor);
/* Insert node pointer to the root */
- page_cur_set_before_first(root_block, page_cursor);
+ page_cur_set_before_first(root, page_cursor);
node_ptr_rec = page_cur_tuple_insert(page_cursor, node_ptr,
index, offsets, heap, 0, mtr);
/* The root page should only contain the node pointer
- to new_page at this point. Thus, the data should fit. */
+ to new_block at this point. Thus, the data should fit. */
ut_a(node_ptr_rec);
/* We play safe and reset the free bits for the new page */
@@ -2504,19 +2499,12 @@ btr_attach_half_pages(
ulint direction, /*!< in: FSP_UP or FSP_DOWN */
mtr_t* mtr) /*!< in: mtr */
{
- ulint prev_page_no;
- ulint next_page_no;
- page_t* page = buf_block_get_frame(block);
- page_t* lower_page;
- page_t* upper_page;
- ulint lower_page_no;
- ulint upper_page_no;
- page_zip_des_t* lower_page_zip;
- page_zip_des_t* upper_page_zip;
dtuple_t* node_ptr_upper;
mem_heap_t* heap;
buf_block_t* prev_block = NULL;
buf_block_t* next_block = NULL;
+ buf_block_t* lower_block;
+ buf_block_t* upper_block;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr_memo_contains(mtr, new_block, MTR_MEMO_PAGE_X_FIX));
@@ -2530,12 +2518,8 @@ btr_attach_half_pages(
btr_cur_t cursor;
ulint* offsets;
- lower_page = buf_block_get_frame(new_block);
- lower_page_no = new_block->page.id.page_no();
- lower_page_zip = buf_block_get_page_zip(new_block);
- upper_page = buf_block_get_frame(block);
- upper_page_no = block->page.id.page_no();
- upper_page_zip = buf_block_get_page_zip(block);
+ lower_block = new_block;
+ upper_block = block;
/* Look up the index for the node pointer to page */
offsets = btr_page_get_father_block(NULL, heap, index,
@@ -2545,17 +2529,13 @@ btr_attach_half_pages(
address of the new lower half */
btr_node_ptr_set_child_page_no(
+ btr_cur_get_block(&cursor),
btr_cur_get_rec(&cursor),
- btr_cur_get_page_zip(&cursor),
- offsets, lower_page_no, mtr);
+ offsets, lower_block->page.id.page_no(), mtr);
mem_heap_empty(heap);
} else {
- lower_page = buf_block_get_frame(block);
- lower_page_no = block->page.id.page_no();
- lower_page_zip = buf_block_get_page_zip(block);
- upper_page = buf_block_get_frame(new_block);
- upper_page_no = new_block->page.id.page_no();
- upper_page_zip = buf_block_get_page_zip(new_block);
+ lower_block = block;
+ upper_block = new_block;
}
/* Get the level of the split pages */
@@ -2563,8 +2543,8 @@ btr_attach_half_pages(
ut_ad(level == btr_page_get_level(buf_block_get_frame(new_block)));
/* Get the previous and next pages of page */
- prev_page_no = btr_page_get_prev(page);
- next_page_no = btr_page_get_next(page);
+ const uint32_t prev_page_no = btr_page_get_prev(block->frame);
+ const uint32_t next_page_no = btr_page_get_next(block->frame);
/* for consistency, both blocks should be locked, before change */
if (prev_page_no != FIL_NULL && direction == FSP_DOWN) {
@@ -2579,8 +2559,8 @@ btr_attach_half_pages(
/* Build the node pointer (= node key and page address) for the upper
half */
- node_ptr_upper = dict_index_build_node_ptr(index, split_rec,
- upper_page_no, heap, level);
+ node_ptr_upper = dict_index_build_node_ptr(
+ index, split_rec, upper_block->page.id.page_no(), heap, level);
/* Insert it next to the pointer to the lower half. Note that this
may generate recursion leading to a split on the higher level. */
@@ -2595,45 +2575,59 @@ btr_attach_half_pages(
if (prev_block) {
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(prev_block->frame) == page_is_comp(page));
+ ut_a(page_is_comp(prev_block->frame)
+ == page_is_comp(block->frame));
ut_a(btr_page_get_next(prev_block->frame)
== block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
-
- btr_page_set_next(buf_block_get_frame(prev_block),
- buf_block_get_page_zip(prev_block),
- lower_page_no, mtr);
+ btr_page_set_next(prev_block, lower_block->page.id.page_no(),
+ mtr);
}
if (next_block) {
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
+ ut_a(page_is_comp(next_block->frame)
+ == page_is_comp(block->frame));
ut_a(btr_page_get_prev(next_block->frame)
- == page_get_page_no(page));
+ == block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
-
- btr_page_set_prev(buf_block_get_frame(next_block),
- buf_block_get_page_zip(next_block),
- upper_page_no, mtr);
+ btr_page_set_prev(next_block, upper_block->page.id.page_no(),
+ mtr);
}
if (direction == FSP_DOWN) {
- /* lower_page is new */
- btr_page_set_prev(lower_page, lower_page_zip,
- prev_page_no, mtr);
+ ut_ad(lower_block == new_block);
+ ut_ad(btr_page_get_next(upper_block->frame) == next_page_no);
+ if (UNIV_UNLIKELY(btr_page_get_prev(lower_block->frame)
+ == prev_page_no)) {
+ ut_ad(index->is_ibuf());
+ } else {
+ btr_page_set_prev(lower_block, prev_page_no, mtr);
+ }
} else {
- ut_ad(btr_page_get_prev(lower_page) == prev_page_no);
+ ut_ad(upper_block == new_block);
+ ut_ad(btr_page_get_prev(lower_block->frame) == prev_page_no);
+ if (UNIV_UNLIKELY(btr_page_get_next(upper_block->frame)
+ == next_page_no)) {
+ ut_ad(index->is_ibuf());
+ } else {
+ btr_page_set_next(upper_block, next_page_no, mtr);
+ }
}
- btr_page_set_next(lower_page, lower_page_zip, upper_page_no, mtr);
- btr_page_set_prev(upper_page, upper_page_zip, lower_page_no, mtr);
-
- if (direction != FSP_DOWN) {
- /* upper_page is new */
- btr_page_set_next(upper_page, upper_page_zip,
- next_page_no, mtr);
+ if (UNIV_UNLIKELY(btr_page_get_next(lower_block->frame)
+ == upper_block->page.id.page_no())) {
+ ut_ad(index->is_ibuf());
+ } else {
+ btr_page_set_next(lower_block, upper_block->page.id.page_no(),
+ mtr);
+ }
+ if (UNIV_UNLIKELY(btr_page_get_prev(upper_block->frame)
+ == lower_block->page.id.page_no())) {
+ ut_ad(index->is_ibuf());
} else {
- ut_ad(btr_page_get_next(upper_page) == next_page_no);
+ btr_page_set_prev(upper_block, lower_block->page.id.page_no(),
+ mtr);
}
}
@@ -3227,38 +3221,30 @@ void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index,
buf_block_t* prev_block = btr_block_get(
index, prev_page_no, RW_X_LATCH, page_is_leaf(page),
mtr);
- page_t* prev_page
- = buf_block_get_frame(prev_block);
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(prev_page) == page_is_comp(page));
+ ut_a(page_is_comp(prev_block->frame) == page_is_comp(page));
static_assert(FIL_PAGE_NEXT % 4 == 0, "alignment");
static_assert(FIL_PAGE_OFFSET % 4 == 0, "alignment");
- ut_a(!memcmp_aligned<4>(prev_page + FIL_PAGE_NEXT,
+ ut_a(!memcmp_aligned<4>(prev_block->frame + FIL_PAGE_NEXT,
page + FIL_PAGE_OFFSET, 4));
#endif /* UNIV_BTR_DEBUG */
- btr_page_set_next(prev_page,
- buf_block_get_page_zip(prev_block),
- next_page_no, mtr);
+ btr_page_set_next(prev_block, next_page_no, mtr);
}
if (next_page_no != FIL_NULL) {
buf_block_t* next_block = btr_block_get(
index, next_page_no, RW_X_LATCH, page_is_leaf(page),
mtr);
- page_t* next_page
- = buf_block_get_frame(next_block);
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(next_page) == page_is_comp(page));
+ ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
static_assert(FIL_PAGE_PREV % 4 == 0, "alignment");
static_assert(FIL_PAGE_OFFSET % 4 == 0, "alignment");
- ut_a(!memcmp_aligned<4>(next_page + FIL_PAGE_PREV,
+ ut_a(!memcmp_aligned<4>(next_block->frame + FIL_PAGE_PREV,
page + FIL_PAGE_OFFSET, 4));
#endif /* UNIV_BTR_DEBUG */
- btr_page_set_prev(next_page,
- buf_block_get_page_zip(next_block),
- prev_page_no, mtr);
+ btr_page_set_prev(next_block, prev_page_no, mtr);
}
}
@@ -3456,15 +3442,8 @@ btr_lift_page_up(
/* Go upward to root page, decrementing levels by one. */
for (i = lift_father_up ? 1 : 0; i < n_blocks; i++, page_level++) {
- page_t* page = buf_block_get_frame(blocks[i]);
- page_zip_des_t* page_zip= buf_block_get_page_zip(blocks[i]);
-
- ut_ad(btr_page_get_level(page) == page_level + 1);
-
- btr_page_set_level(page, page_zip, page_level, mtr);
-#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
-#endif /* UNIV_ZIP_DEBUG */
+ ut_ad(btr_page_get_level(blocks[i]->frame) == page_level + 1);
+ btr_page_set_level(blocks[i], page_level, mtr);
}
if (dict_index_is_spatial(index)) {
@@ -3838,8 +3817,8 @@ retry:
/* Replace the address of the old child node (= page) with the
address of the merge page to the right */
btr_node_ptr_set_child_page_no(
+ btr_cur_get_block(&father_cursor),
btr_cur_get_rec(&father_cursor),
- btr_cur_get_page_zip(&father_cursor),
offsets, right_page_no, mtr);
#ifdef UNIV_DEBUG
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index c64a64466d8..8be3d6d4b01 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -92,6 +92,8 @@ PageBulk::init()
new_page_zip = buf_block_get_page_zip(new_block);
new_page_no = page_get_page_no(new_page);
+ byte* index_id = PAGE_HEADER + PAGE_INDEX_ID + new_page;
+
if (new_page_zip) {
page_create_zip(new_block, m_index, m_level, 0,
&m_mtr);
@@ -100,11 +102,9 @@ PageBulk::init()
page_zip_write_header(new_page_zip,
FIL_PAGE_PREV + new_page,
8, &m_mtr);
- mach_write_to_8(PAGE_HEADER + PAGE_INDEX_ID + new_page,
- m_index->id);
- page_zip_write_header(new_page_zip,
- PAGE_HEADER + PAGE_INDEX_ID
- + new_page, 8, &m_mtr);
+ mach_write_to_8(index_id, m_index->id);
+ page_zip_write_header(new_page_zip, index_id,
+ 8, &m_mtr);
} else {
ut_ad(!dict_index_is_spatial(m_index));
page_create(new_block, &m_mtr,
@@ -114,10 +114,10 @@ PageBulk::init()
== FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(new_block, FIL_PAGE_PREV, 8, 0xff, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_LEVEL + new_page,
- m_level, MLOG_2BYTES, &m_mtr);
- mlog_write_ull(PAGE_HEADER + PAGE_INDEX_ID + new_page,
- m_index->id, &m_mtr);
+ m_mtr.write<2,mtr_t::OPT>(*new_block,
+ PAGE_HEADER + PAGE_LEVEL
+ + new_page, m_level);
+ m_mtr.write<8>(*new_block, index_id, m_index->id);
}
} else {
new_block = btr_block_get(*m_index, m_page_no, RW_X_LATCH,
@@ -130,7 +130,7 @@ PageBulk::init()
ut_ad(page_dir_get_n_heap(new_page) == PAGE_HEAP_NO_USER_LOW);
- btr_page_set_level(new_page, new_page_zip, m_level, &m_mtr);
+ btr_page_set_level(new_block, m_level, &m_mtr);
}
if (!m_level && dict_index_is_sec_or_ibuf(m_index)) {
@@ -169,13 +169,14 @@ PageBulk::init()
}
/** Insert a record in the page.
+@tparam fmt the page format
@param[in] rec record
@param[in] offsets record offsets */
-void
-PageBulk::insert(
- const rec_t* rec,
- ulint* offsets)
+template<PageBulk::format fmt>
+inline void PageBulk::insertPage(const rec_t *rec, ulint *offsets)
{
+ ut_ad((m_page_zip != nullptr) == (fmt == COMPRESSED));
+ ut_ad((fmt != REDUNDANT) == m_is_comp);
ulint rec_size;
ut_ad(m_heap != NULL);
@@ -210,7 +211,7 @@ PageBulk::insert(
/* 3. Set the n_owned field in the inserted record to zero,
and set the heap_no field. */
- if (m_is_comp) {
+ if (fmt != REDUNDANT) {
rec_set_n_owned_new(insert_rec, NULL, 0);
rec_set_heap_no_new(insert_rec,
PAGE_HEAP_NO_USER_LOW + m_rec_no);
@@ -242,15 +243,30 @@ PageBulk::insert(
m_cur_rec = insert_rec;
}
+/** Insert a record in the page.
+@param[in] rec record
+@param[in] offsets record offsets */
+inline void PageBulk::insert(const rec_t *rec, ulint *offsets)
+{
+ if (UNIV_LIKELY_NULL(m_page_zip))
+ insertPage<COMPRESSED>(rec, offsets);
+ else if (m_is_comp)
+ insertPage<DYNAMIC>(rec, offsets);
+ else
+ insertPage<REDUNDANT>(rec, offsets);
+}
+
/** Mark end of insertion to the page. Scan all records to set page dirs,
and set page header members.
-Note: we refer to page_copy_rec_list_end_to_created_page. */
-void
-PageBulk::finish()
+@tparam fmt the page format */
+template<PageBulk::format fmt>
+inline void PageBulk::finishPage()
{
ut_ad(m_rec_no > 0);
+ ut_ad((m_page_zip != nullptr) == (fmt == COMPRESSED));
+ ut_ad((fmt != REDUNDANT) == m_is_comp);
ut_ad(m_total_data + page_dir_calc_reserved_space(m_rec_no)
- <= page_get_free_space_of_empty(m_is_comp));
+ <= page_get_free_space_of_empty(fmt != REDUNDANT));
/* See page_copy_rec_list_end_to_created_page() */
ut_d(page_dir_set_n_slots(m_page, NULL, srv_page_size / 2));
@@ -304,26 +320,26 @@ PageBulk::finish()
ut_ad(!dict_index_is_spatial(m_index));
ut_ad(!page_get_instant(m_page));
-
- if (!m_flush_observer && !m_page_zip) {
- mlog_write_ulint(PAGE_HEADER + PAGE_N_DIR_SLOTS + m_page,
- 2 + slot_index, MLOG_2BYTES, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_HEAP_TOP + m_page,
- ulint(m_heap_top - m_page),
- MLOG_2BYTES, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_N_HEAP + m_page,
- (PAGE_HEAP_NO_USER_LOW + m_rec_no)
- | ulint(m_is_comp) << 15,
- MLOG_2BYTES, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no,
- MLOG_2BYTES, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_LAST_INSERT + m_page,
- ulint(m_cur_rec - m_page),
- MLOG_2BYTES, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
- PAGE_RIGHT, MLOG_2BYTES, &m_mtr);
- mlog_write_ulint(PAGE_HEADER + PAGE_N_DIRECTION + m_page, 0,
- MLOG_2BYTES, &m_mtr);
+ ut_ad(!mach_read_from_2(PAGE_HEADER + PAGE_N_DIRECTION + m_page));
+
+ if (fmt != COMPRESSED && !m_flush_observer) {
+ m_mtr.write<2,mtr_t::OPT>(*m_block,
+ PAGE_HEADER + PAGE_N_DIR_SLOTS
+ + m_page, 2 + slot_index);
+ m_mtr.write<2>(*m_block, PAGE_HEADER + PAGE_HEAP_TOP + m_page,
+ ulint(m_heap_top - m_page));
+ m_mtr.write<2>(*m_block,
+ PAGE_HEADER + PAGE_N_HEAP + m_page,
+ (PAGE_HEAP_NO_USER_LOW + m_rec_no)
+ | uint16_t{fmt != REDUNDANT} << 15);
+ m_mtr.write<2>(*m_block,
+ PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
+ m_mtr.write<2>(*m_block,
+ PAGE_HEADER + PAGE_LAST_INSERT + m_page,
+ ulint(m_cur_rec - m_page));
+ m_mtr.write<2>(*m_block,
+ PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
+ PAGE_RIGHT);
} else {
/* For ROW_FORMAT=COMPRESSED, redo log may be written
in PageBulk::compress(). */
@@ -333,18 +349,29 @@ PageBulk::finish()
ulint(m_heap_top - m_page));
mach_write_to_2(PAGE_HEADER + PAGE_N_HEAP + m_page,
(PAGE_HEAP_NO_USER_LOW + m_rec_no)
- | ulint(m_is_comp) << 15);
+ | uint16_t{fmt != REDUNDANT} << 15);
mach_write_to_2(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
mach_write_to_2(PAGE_HEADER + PAGE_LAST_INSERT + m_page,
ulint(m_cur_rec - m_page));
mach_write_to_2(PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
PAGE_RIGHT);
- mach_write_to_2(PAGE_HEADER + PAGE_N_DIRECTION + m_page, 0);
}
m_block->skip_flush_check = false;
}
+/** Mark end of insertion to the page. Scan all records to set page dirs,
+and set page header members. */
+inline void PageBulk::finish()
+{
+ if (UNIV_LIKELY_NULL(m_page_zip))
+ finishPage<COMPRESSED>();
+ else if (m_is_comp)
+ finishPage<DYNAMIC>();
+ else
+ finishPage<REDUNDANT>();
+}
+
/** Commit inserts done to the page
@param[in] success Flag whether all inserts succeed. */
void
@@ -521,28 +548,24 @@ PageBulk::copyOut(
@param[in] next_page_no next page no */
inline void PageBulk::setNext(ulint next_page_no)
{
- if (UNIV_LIKELY_NULL(m_page_zip)) {
- /* For ROW_FORMAT=COMPRESSED, redo log may be written
- in PageBulk::compress(). */
- mach_write_to_4(m_page + FIL_PAGE_NEXT, next_page_no);
- } else {
- mlog_write_ulint(m_page + FIL_PAGE_NEXT, next_page_no,
- MLOG_4BYTES, &m_mtr);
- }
+ if (UNIV_LIKELY_NULL(m_page_zip))
+ /* For ROW_FORMAT=COMPRESSED, redo log may be written
+ in PageBulk::compress(). */
+ mach_write_to_4(m_page + FIL_PAGE_NEXT, next_page_no);
+ else
+ m_mtr.write<4>(*m_block, m_page + FIL_PAGE_NEXT, next_page_no);
}
/** Set previous page
@param[in] prev_page_no previous page no */
inline void PageBulk::setPrev(ulint prev_page_no)
{
- if (UNIV_LIKELY_NULL(m_page_zip)) {
- /* For ROW_FORMAT=COMPRESSED, redo log may be written
- in PageBulk::compress(). */
- mach_write_to_4(m_page + FIL_PAGE_PREV, prev_page_no);
- } else {
- mlog_write_ulint(m_page + FIL_PAGE_PREV, prev_page_no,
- MLOG_4BYTES, &m_mtr);
- }
+ if (UNIV_LIKELY_NULL(m_page_zip))
+ /* For ROW_FORMAT=COMPRESSED, redo log may be written
+ in PageBulk::compress(). */
+ mach_write_to_4(m_page + FIL_PAGE_PREV, prev_page_no);
+ else
+ m_mtr.write<4>(*m_block, m_page + FIL_PAGE_PREV, prev_page_no);
}
/** Check if required space is available in the page for the rec to be inserted.
@@ -748,9 +771,10 @@ BtrBulk::pageCommit(
page_bulk->setNext(next_page_bulk->getPageNo());
next_page_bulk->setPrev(page_bulk->getPageNo());
} else {
- /** Suppose a page is released and latched again, we need to
+ ut_ad(!page_has_next(page_bulk->getPage()));
+ /* If a page is released and latched again, we need to
mark it modified in mini-transaction. */
- page_bulk->setNext(FIL_NULL);
+ page_bulk->set_modified();
}
ut_ad(!rw_lock_own_flagged(&m_index->lock,
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index f3609b3fa96..c181a5af058 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -154,8 +154,7 @@ static
void
btr_cur_unmark_extern_fields(
/*=========================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
@@ -181,8 +180,7 @@ btr_rec_free_updated_extern_fields(
dict_index_t* index, /*!< in: index of rec; the index tree MUST be
X-latched */
rec_t* rec, /*!< in: record */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in: index page of rec */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const upd_t* update, /*!< in: update vector */
bool rollback,/*!< in: performing rollback? */
@@ -198,8 +196,7 @@ btr_rec_free_externally_stored_fields(
tree MUST be X-latched */
rec_t* rec, /*!< in: record */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in: index page of rec */
bool rollback,/*!< in: performing rollback? */
mtr_t* mtr); /*!< in: mini-transaction handle which contains
an X-latch to record page and to the index
@@ -224,7 +221,6 @@ btr_cur_latch_leaves(
uint32_t left_page_no;
uint32_t right_page_no;
buf_block_t* get_block;
- page_t* page = buf_block_get_frame(block);
bool spatial;
btr_latch_leaves_t latch_leaves = {{NULL, NULL, NULL}, {0, 0, 0}};
@@ -252,7 +248,8 @@ btr_cur_latch_leaves(
true, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
+ ut_a(page_is_comp(get_block->frame)
+ == page_is_comp(block->frame));
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
cursor->rtr_info->tree_blocks[RTR_MAX_LEVELS]
@@ -268,7 +265,7 @@ btr_cur_latch_leaves(
dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
/* x-latch also siblings from left to right */
- left_page_no = btr_page_get_prev(page);
+ left_page_no = btr_page_get_prev(block->frame);
if (left_page_no != FIL_NULL) {
@@ -304,11 +301,12 @@ btr_cur_latch_leaves(
/* Sanity check only after both the blocks are latched. */
if (latch_leaves.blocks[0] != NULL) {
ut_a(page_is_comp(latch_leaves.blocks[0]->frame)
- == page_is_comp(page));
+ == page_is_comp(block->frame));
ut_a(btr_page_get_next(latch_leaves.blocks[0]->frame)
- == page_get_page_no(page));
+ == block->page.id.page_no());
}
- ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
+ ut_a(page_is_comp(get_block->frame)
+ == page_is_comp(block->frame));
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
@@ -316,7 +314,7 @@ btr_cur_latch_leaves(
= get_block;
}
- right_page_no = btr_page_get_next(page);
+ right_page_no = btr_page_get_next(block->frame);
if (right_page_no != FIL_NULL) {
if (spatial) {
@@ -331,9 +329,9 @@ btr_cur_latch_leaves(
latch_leaves.blocks[2] = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
- == page_is_comp(page));
+ == page_is_comp(block->frame));
ut_a(btr_page_get_prev(get_block->frame)
- == page_get_page_no(page));
+ == block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
if (spatial) {
cursor->rtr_info->tree_blocks[
@@ -348,7 +346,7 @@ btr_cur_latch_leaves(
mode = latch_mode == BTR_SEARCH_PREV ? RW_S_LATCH : RW_X_LATCH;
/* latch also left sibling */
rw_lock_s_lock(&block->lock);
- left_page_no = btr_page_get_prev(page);
+ left_page_no = btr_page_get_prev(block->frame);
rw_lock_s_unlock(&block->lock);
if (left_page_no != FIL_NULL) {
@@ -360,9 +358,9 @@ btr_cur_latch_leaves(
cursor->left_block = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
- == page_is_comp(page));
+ == page_is_comp(block->frame));
ut_a(btr_page_get_next(get_block->frame)
- == page_get_page_no(page));
+ == block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
}
@@ -372,7 +370,8 @@ btr_cur_latch_leaves(
true, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(get_block->frame) == page_is_comp(page));
+ ut_a(page_is_comp(get_block->frame)
+ == page_is_comp(block->frame));
#endif /* UNIV_BTR_DEBUG */
return(latch_leaves);
case BTR_CONT_MODIFY_TREE:
@@ -2424,8 +2423,7 @@ need_opposite_intention:
cursor->up_bytes = up_bytes;
if (autoinc) {
- page_set_autoinc(tree_blocks[0],
- index, autoinc, mtr, false);
+ page_set_autoinc(tree_blocks[0], autoinc, mtr, false);
}
#ifdef BTR_CUR_HASH_ADAPT
@@ -4160,8 +4158,6 @@ btr_cur_update_in_place(
further pages */
{
dict_index_t* index;
- buf_block_t* block;
- page_zip_des_t* page_zip;
dberr_t err;
rec_t* rec;
roll_ptr_t roll_ptr = 0;
@@ -4190,11 +4186,11 @@ btr_cur_update_in_place(
<< ") by " << ib::hex(trx_id) << ": "
<< rec_printer(rec, offsets).str());
- block = btr_cur_get_block(cursor);
- page_zip = buf_block_get_page_zip(block);
+ buf_block_t* block = btr_cur_get_block(cursor);
+ page_zip_des_t* page_zip = buf_block_get_page_zip(block);
/* Check that enough space is available on the compressed page. */
- if (page_zip) {
+ if (UNIV_LIKELY_NULL(page_zip)) {
ut_ad(!index->table->is_temporary());
if (!btr_cur_update_alloc_zip(
@@ -4277,8 +4273,7 @@ btr_cur_update_in_place(
/* The new updated record owns its possible externally
stored fields */
- btr_cur_unmark_extern_fields(page_zip,
- rec, index, offsets, mtr);
+ btr_cur_unmark_extern_fields(block, rec, index, offsets, mtr);
}
ut_ad(err == DB_SUCCESS);
@@ -4803,7 +4798,6 @@ btr_cur_pessimistic_update(
big_rec_t* dummy_big_rec;
dict_index_t* index;
buf_block_t* block;
- page_t* page;
page_zip_des_t* page_zip;
rec_t* rec;
page_cur_t* page_cursor;
@@ -4813,13 +4807,11 @@ btr_cur_pessimistic_update(
ibool was_first;
ulint n_reserved = 0;
ulint n_ext;
- ulint max_ins_size = 0;
*offsets = NULL;
*big_rec = NULL;
block = btr_cur_get_block(cursor);
- page = buf_block_get_frame(block);
page_zip = buf_block_get_page_zip(block);
index = cursor->index;
@@ -4828,7 +4820,7 @@ btr_cur_pessimistic_update(
MTR_MEMO_SX_LOCK));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+ ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
ut_ad(!page_zip || !index->table->is_temporary());
/* The insert buffer tree should never be updated in place. */
@@ -4861,7 +4853,7 @@ btr_cur_pessimistic_update(
if (page_zip
&& optim_err != DB_ZIP_OVERFLOW
&& !dict_index_is_clust(index)
- && page_is_leaf(page)) {
+ && page_is_leaf(block->frame)) {
ut_ad(!index->table->is_temporary());
ibuf_update_free_bits_zip(block, mtr);
}
@@ -4910,7 +4902,7 @@ btr_cur_pessimistic_update(
/* We have to set appropriate extern storage bits in the new
record to be inserted: we have to remember which fields were such */
- ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec));
+ ut_ad(!page_is_comp(block->frame) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_validate(rec, index, *offsets));
if (index->is_primary()) {
n_ext += btr_push_update_extern_fields(
@@ -4933,12 +4925,12 @@ btr_cur_pessimistic_update(
DEBUG_SYNC_C("blob_rollback_middle");
btr_rec_free_updated_extern_fields(
- index, rec, page_zip, *offsets, update, true, mtr);
+ index, rec, block, *offsets, update, true, mtr);
}
if (page_zip_rec_needs_ext(
rec_get_converted_size(index, new_entry, n_ext),
- page_is_comp(page),
+ page_is_comp(block->frame),
dict_index_get_n_fields(index),
block->zip_size())
|| (UNIV_UNLIKELY(update->is_alter_metadata())
@@ -4954,14 +4946,15 @@ btr_cur_pessimistic_update(
BTR_KEEP_IBUF_BITMAP. */
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip
- || page_zip_validate(page_zip, page, index));
+ || page_zip_validate(page_zip, block->frame,
+ index));
#endif /* UNIV_ZIP_DEBUG */
index->table->space->release_free_extents(n_reserved);
err = DB_TOO_BIG_RECORD;
goto err_exit;
}
- ut_ad(page_is_leaf(page));
+ ut_ad(page_is_leaf(block->frame));
ut_ad(dict_index_is_clust(index));
ut_ad(flags & BTR_KEEP_POS_FLAG);
}
@@ -4996,10 +4989,9 @@ btr_cur_pessimistic_update(
btr_cur_write_sys(new_entry, index, trx_id, roll_ptr);
}
- if (!page_zip) {
- max_ins_size = page_get_max_insert_size_after_reorganize(
- page, 1);
- }
+ const ulint max_ins_size = page_zip
+ ? 0 : page_get_max_insert_size_after_reorganize(block->frame,
+ 1);
if (UNIV_UNLIKELY(is_metadata)) {
ut_ad(new_entry->is_metadata());
@@ -5027,7 +5019,7 @@ btr_cur_pessimistic_update(
}
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+ ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
@@ -5058,8 +5050,8 @@ btr_cur_pessimistic_update(
|| rec_is_alter_metadata(rec, *index)) {
/* The new inserted record owns its possible externally
stored fields */
- btr_cur_unmark_extern_fields(
- page_zip, rec, index, *offsets, mtr);
+ btr_cur_unmark_extern_fields(btr_cur_get_block(cursor),
+ rec, index, *offsets, mtr);
} else {
/* In delete-marked records, DB_TRX_ID must
always refer to an existing undo log record. */
@@ -5067,7 +5059,7 @@ btr_cur_pessimistic_update(
}
bool adjust = big_rec_vec && (flags & BTR_KEEP_POS_FLAG);
- ut_ad(!adjust || page_is_leaf(page));
+ ut_ad(!adjust || page_is_leaf(block->frame));
if (btr_cur_compress_if_useful(cursor, adjust, mtr)) {
if (adjust) {
@@ -5075,7 +5067,7 @@ btr_cur_pessimistic_update(
true, *offsets);
}
} else if (!dict_index_is_clust(index)
- && page_is_leaf(page)) {
+ && page_is_leaf(block->frame)) {
/* Update the free bits in the insert buffer.
This is the same block which was skipped by
BTR_KEEP_IBUF_BITMAP. */
@@ -5090,7 +5082,7 @@ btr_cur_pessimistic_update(
if (!srv_read_only_mode
&& !big_rec_vec
- && page_is_leaf(page)
+ && page_is_leaf(block->frame)
&& !dict_index_is_online_ddl(index)) {
mtr_memo_release(mtr, dict_index_get_lock(index),
@@ -5115,13 +5107,13 @@ btr_cur_pessimistic_update(
BTR_KEEP_IBUF_BITMAP. */
if (!dict_index_is_clust(index)
&& !index->table->is_temporary()
- && page_is_leaf(page)) {
+ && page_is_leaf(block->frame)) {
ibuf_reset_free_bits(block);
}
}
if (big_rec_vec != NULL) {
- ut_ad(page_is_leaf(page));
+ ut_ad(page_is_leaf(block->frame));
ut_ad(dict_index_is_clust(index));
ut_ad(flags & BTR_KEEP_POS_FLAG);
@@ -5170,28 +5162,20 @@ btr_cur_pessimistic_update(
/* Update PAGE_MAX_TRX_ID in the index page header.
It was not updated by btr_cur_pessimistic_insert()
because of BTR_NO_LOCKING_FLAG. */
- buf_block_t* rec_block;
-
- rec_block = btr_cur_get_block(cursor);
-
- page_update_max_trx_id(rec_block,
- buf_block_get_page_zip(rec_block),
+ page_update_max_trx_id(btr_cur_get_block(cursor),
+ btr_cur_get_page_zip(cursor),
trx_id, mtr);
}
if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
/* The new inserted record owns its possible externally
stored fields */
- buf_block_t* rec_block = btr_cur_get_block(cursor);
-
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
- page = buf_block_get_frame(rec_block);
+ ut_a(!page_zip || page_zip_validate(page_zip, block->frame,
+ index));
#endif /* UNIV_ZIP_DEBUG */
- page_zip = buf_block_get_page_zip(rec_block);
-
- btr_cur_unmark_extern_fields(page_zip,
- rec, index, *offsets, mtr);
+ btr_cur_unmark_extern_fields(btr_cur_get_block(cursor), rec,
+ index, *offsets, mtr);
} else {
/* In delete-marked records, DB_TRX_ID must
always refer to an existing undo log record. */
@@ -5222,7 +5206,8 @@ btr_cur_pessimistic_update(
return_after_reservations:
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+ ut_a(!page_zip || page_zip_validate(btr_cur_get_page_zip(cursor),
+ btr_cur_get_page(cursor), index));
#endif /* UNIV_ZIP_DEBUG */
index->table->space->release_free_extents(n_reserved);
@@ -5393,7 +5378,6 @@ btr_cur_del_mark_set_clust_rec(
{
roll_ptr_t roll_ptr;
dberr_t err;
- page_zip_des_t* page_zip;
trx_t* trx;
ut_ad(dict_index_is_clust(index));
@@ -5431,7 +5415,7 @@ btr_cur_del_mark_set_clust_rec(
the adaptive hash index does not depend on the delete-mark
and the delete-mark is being updated in place. */
- page_zip = buf_block_get_page_zip(block);
+ page_zip_des_t* page_zip = buf_block_get_page_zip(block);
btr_rec_set_deleted_flag(rec, page_zip, TRUE);
@@ -5905,7 +5889,7 @@ btr_cur_pessimistic_delete(
if (rec_offs_any_extern(offsets)) {
btr_rec_free_externally_stored_fields(index,
- rec, offsets, page_zip,
+ rec, offsets, block,
rollback, mtr);
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
@@ -7144,13 +7128,12 @@ static
void
btr_cur_set_ownership_of_extern_field(
/*==================================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: clustered index record */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint i, /*!< in: field number */
- ibool val, /*!< in: value to set */
+ bool val, /*!< in: value to set */
mtr_t* mtr) /*!< in: mtr, or NULL if not logged */
{
byte* data;
@@ -7174,15 +7157,14 @@ btr_cur_set_ownership_of_extern_field(
byte_val |= BTR_EXTERN_OWNER_FLAG;
}
- if (page_zip) {
+ if (UNIV_LIKELY_NULL(block->page.zip.data)) {
mach_write_to_1(data + local_len + BTR_EXTERN_LEN, byte_val);
- page_zip_write_blob_ptr(page_zip, rec, index, offsets, i, mtr);
- } else if (mtr != NULL) {
-
- mlog_write_ulint(data + local_len + BTR_EXTERN_LEN, byte_val,
- MLOG_1BYTE, mtr);
+ page_zip_write_blob_ptr(&block->page.zip, rec, index, offsets,
+ i, mtr);
} else {
- mach_write_to_1(data + local_len + BTR_EXTERN_LEN, byte_val);
+ mtr->write<1,mtr_t::OPT>(*block,
+ data + local_len + BTR_EXTERN_LEN,
+ byte_val);
}
}
@@ -7194,8 +7176,7 @@ to free the field. */
void
btr_cur_disown_inherited_fields(
/*============================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
@@ -7212,7 +7193,7 @@ btr_cur_disown_inherited_fields(
if (rec_offs_nth_extern(offsets, i)
&& !upd_get_field_by_field_no(update, i, false)) {
btr_cur_set_ownership_of_extern_field(
- page_zip, rec, index, offsets, i, FALSE, mtr);
+ block, rec, index, offsets, i, false, mtr);
}
}
}
@@ -7225,29 +7206,23 @@ static
void
btr_cur_unmark_extern_fields(
/*=========================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
mtr_t* mtr) /*!< in: mtr, or NULL if not logged */
{
- ulint n;
- ulint i;
-
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
- n = rec_offs_n_fields(offsets);
-
if (!rec_offs_any_extern(offsets)) {
-
return;
}
- for (i = 0; i < n; i++) {
- if (rec_offs_nth_extern(offsets, i)) {
+ const ulint n = rec_offs_n_fields(offsets);
+ for (ulint i = 0; i < n; i++) {
+ if (rec_offs_nth_extern(offsets, i)) {
btr_cur_set_ownership_of_extern_field(
- page_zip, rec, index, offsets, i, TRUE, mtr);
+ block, rec, index, offsets, i, true, mtr);
}
}
}
@@ -7648,7 +7623,6 @@ btr_store_big_rec_extern_fields(
for (ulint blob_npages = 0;; ++blob_npages) {
buf_block_t* block;
- page_t* page;
const ulint commit_freq = 4;
ulint r_extents;
@@ -7711,11 +7685,9 @@ btr_store_big_rec_extern_fields(
ut_a(block != NULL);
page_no = block->page.id.page_no();
- page = buf_block_get_frame(block);
if (prev_page_no != FIL_NULL) {
buf_block_t* prev_block;
- page_t* prev_page;
prev_block = buf_page_get(
page_id_t(space_id, prev_page_no),
@@ -7724,23 +7696,25 @@ btr_store_big_rec_extern_fields(
buf_block_dbg_add_level(prev_block,
SYNC_EXTERN_STORAGE);
- prev_page = buf_block_get_frame(prev_block);
if (page_zip) {
- mlog_write_ulint(
- prev_page + FIL_PAGE_NEXT,
- page_no, MLOG_4BYTES, &mtr);
- memcpy(buf_block_get_page_zip(
- prev_block)
- ->data + FIL_PAGE_NEXT,
- prev_page + FIL_PAGE_NEXT, 4);
+ mtr.write<4>(*prev_block,
+ prev_block->frame
+ + FIL_PAGE_NEXT,
+ page_no);
+ memcpy_aligned<4>(
+ buf_block_get_page_zip(
+ prev_block)
+ ->data + FIL_PAGE_NEXT,
+ prev_block->frame
+ + FIL_PAGE_NEXT, 4);
} else {
- mlog_write_ulint(
- prev_page + FIL_PAGE_DATA
- + BTR_BLOB_HDR_NEXT_PAGE_NO,
- page_no, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*prev_block,
+ BTR_BLOB_HDR_NEXT_PAGE_NO
+ + FIL_PAGE_DATA
+ + prev_block->frame,
+ page_no);
}
-
} else if (dict_index_is_online_ddl(index)) {
row_log_table_blob_alloc(index, page_no);
}
@@ -7751,7 +7725,7 @@ btr_store_big_rec_extern_fields(
/* Write FIL_PAGE_TYPE to the redo log
separately, before logging any other
- changes to the page, so that the debug
+ changes to the block, so that the debug
assertions in
recv_parse_or_apply_log_rec_body() can
be made simpler. Before InnoDB Plugin
@@ -7759,13 +7733,13 @@ btr_store_big_rec_extern_fields(
FIL_PAGE_TYPE was logged as part of
the mlog_log_string() below. */
- mlog_write_ulint(page + FIL_PAGE_TYPE,
- prev_page_no == FIL_NULL
- ? FIL_PAGE_TYPE_ZBLOB
- : FIL_PAGE_TYPE_ZBLOB2,
- MLOG_2BYTES, &mtr);
+ mtr.write<2>(*block,
+ block->frame + FIL_PAGE_TYPE,
+ prev_page_no == FIL_NULL
+ ? FIL_PAGE_TYPE_ZBLOB
+ : FIL_PAGE_TYPE_ZBLOB2);
- c_stream.next_out = page
+ c_stream.next_out = block->frame
+ FIL_PAGE_DATA;
c_stream.avail_out = static_cast<uInt>(
payload_size_zip);
@@ -7799,15 +7773,13 @@ btr_store_big_rec_extern_fields(
Number */
ut_ad(!dict_index_is_spatial(index));
- mlog_write_ulint(page
- + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
- space_id,
- MLOG_4BYTES, &mtr);
- mlog_write_ulint(page
- + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4,
- rec_page_no,
- MLOG_4BYTES, &mtr);
- mlog_log_string(page
+ mtr.write<4>(*block, block->frame
+ + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ space_id);
+ mtr.write<4>(*block, block->frame
+ + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4,
+ rec_page_no);
+ mlog_log_string(block->frame
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
page_zip_get_size(page_zip)
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
@@ -7828,7 +7800,7 @@ btr_store_big_rec_extern_fields(
ut_ad(blob_page_zip);
ut_ad(page_zip_get_size(blob_page_zip)
== page_zip_get_size(page_zip));
- memcpy(blob_page_zip->data, page,
+ memcpy(blob_page_zip->data, block->frame,
page_zip_get_size(page_zip));
if (err == Z_OK && prev_page_no != FIL_NULL) {
@@ -7880,9 +7852,9 @@ next_zip_page:
break;
}
} else {
- mlog_write_ulint(page + FIL_PAGE_TYPE,
- FIL_PAGE_TYPE_BLOB,
- MLOG_2BYTES, &mtr);
+ mtr.write<2>(*block, FIL_PAGE_TYPE
+ + block->frame,
+ FIL_PAGE_TYPE_BLOB);
if (extern_len > payload_size) {
store_len = payload_size;
@@ -7890,47 +7862,44 @@ next_zip_page:
store_len = extern_len;
}
- mlog_write_string(page + FIL_PAGE_DATA
- + BTR_BLOB_HDR_SIZE,
+ mlog_write_string(FIL_PAGE_DATA
+ + BTR_BLOB_HDR_SIZE
+ + block->frame,
(const byte*)
big_rec_vec->fields[i].data
+ big_rec_vec->fields[i].len
- extern_len,
store_len, &mtr);
- mlog_write_ulint(page + FIL_PAGE_DATA
- + BTR_BLOB_HDR_PART_LEN,
- store_len, MLOG_4BYTES, &mtr);
- mlog_write_ulint(page + FIL_PAGE_DATA
- + BTR_BLOB_HDR_NEXT_PAGE_NO,
- FIL_NULL, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*block, BTR_BLOB_HDR_PART_LEN
+ + FIL_PAGE_DATA + block->frame,
+ store_len);
+ compile_time_assert(FIL_NULL == 0xffffffff);
+ mlog_memset(block, BTR_BLOB_HDR_NEXT_PAGE_NO
+ + FIL_PAGE_DATA, 4, 0xff, &mtr);
extern_len -= store_len;
ut_ad(!mach_read_from_4(BTR_EXTERN_LEN
+ field_ref));
- mlog_write_ulint(field_ref
- + BTR_EXTERN_LEN + 4,
- big_rec_vec->fields[i].len
- - extern_len,
- MLOG_4BYTES, &mtr);
+ mtr.write<4>(*rec_block,
+ BTR_EXTERN_LEN + 4 + field_ref,
+ big_rec_vec->fields[i].len
+ - extern_len);
if (prev_page_no == FIL_NULL) {
ut_ad(blob_npages == 0);
- mlog_write_ulint(field_ref
- + BTR_EXTERN_SPACE_ID,
- space_id, MLOG_4BYTES,
- &mtr);
-
- mlog_write_ulint(field_ref
- + BTR_EXTERN_PAGE_NO,
- page_no, MLOG_4BYTES,
- &mtr);
-
- mlog_write_ulint(field_ref
- + BTR_EXTERN_OFFSET,
- FIL_PAGE_DATA,
- MLOG_4BYTES,
- &mtr);
+ mtr.write<4,mtr_t::OPT>(
+ *rec_block,
+ field_ref + BTR_EXTERN_SPACE_ID,
+ space_id);
+
+ mtr.write<4>(*rec_block, field_ref
+ + BTR_EXTERN_PAGE_NO,
+ page_no);
+
+ mtr.write<4>(*rec_block, field_ref
+ + BTR_EXTERN_OFFSET,
+ FIL_PAGE_DATA);
}
prev_page_no = page_no;
@@ -8038,8 +8007,7 @@ btr_free_externally_stored_field(
page_zip_write_blob_ptr(), or NULL */
const ulint* offsets, /*!< in: rec_get_offsets(rec, index),
or NULL */
- page_zip_des_t* page_zip, /*!< in: compressed page corresponding
- to rec, or NULL if rec == NULL */
+ buf_block_t* block, /*!< in/out: page of field_ref */
ulint i, /*!< in: field number of field_ref;
ignored if rec == NULL */
bool rollback, /*!< in: performing rollback? */
@@ -8084,10 +8052,8 @@ btr_free_externally_stored_field(
const ulint ext_zip_size = index->table->space->zip_size();
const ulint rec_zip_size = rec ? ext_zip_size : 0;
- if (rec == NULL) {
- /* This is a call from row_purge_upd_exist_or_extern(). */
- ut_ad(!page_zip);
- }
+ /* !rec holds in a call from purge when field_ref is in an undo page */
+ ut_ad(rec || !block->page.zip.data);
for (;;) {
#ifdef UNIV_DEBUG
@@ -8156,24 +8122,23 @@ btr_free_externally_stored_field(
btr_page_free(index, ext_block, &mtr, true);
- if (page_zip != NULL) {
+ if (UNIV_LIKELY_NULL(block->page.zip.data)) {
mach_write_to_4(field_ref + BTR_EXTERN_PAGE_NO,
next_page_no);
- mach_write_to_4(field_ref + BTR_EXTERN_LEN + 4,
- 0);
- page_zip_write_blob_ptr(page_zip, rec, index,
+ memset(field_ref + BTR_EXTERN_LEN, 0, 4);
+ page_zip_write_blob_ptr(&block->page.zip,
+ rec, index,
offsets, i, &mtr);
} else {
- mlog_write_ulint(field_ref
- + BTR_EXTERN_PAGE_NO,
- next_page_no,
- MLOG_4BYTES, &mtr);
- mlog_write_ulint(field_ref
- + BTR_EXTERN_LEN + 4, 0,
- MLOG_4BYTES, &mtr);
+ mtr.write<4>(*block,
+ BTR_EXTERN_PAGE_NO + field_ref,
+ next_page_no);
+ mtr.write<4>(*block,
+ BTR_EXTERN_LEN + 4 + field_ref,
+ 0U);
}
} else {
- ut_a(!page_zip);
+ ut_ad(!block->page.zip.data);
btr_check_blob_fil_page_type(space_id, page_no, page,
FALSE);
@@ -8182,17 +8147,16 @@ btr_free_externally_stored_field(
+ BTR_BLOB_HDR_NEXT_PAGE_NO);
btr_page_free(index, ext_block, &mtr, true);
- mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO,
- next_page_no,
- MLOG_4BYTES, &mtr);
+ mtr.write<4>(*block, BTR_EXTERN_PAGE_NO + field_ref,
+ next_page_no);
/* Zero out the BLOB length. If the server
crashes during the execution of this function,
trx_rollback_all_recovered() could
dereference the half-deleted BLOB, fetching a
wrong prefix for the BLOB. */
- mlog_write_ulint(field_ref + BTR_EXTERN_LEN + 4,
- 0,
- MLOG_4BYTES, &mtr);
+ mtr.write<4,mtr_t::OPT>(*block,
+ BTR_EXTERN_LEN + 4 + field_ref,
+ 0U);
}
/* Commit mtr and release the BLOB block to save memory. */
@@ -8210,8 +8174,7 @@ btr_rec_free_externally_stored_fields(
tree MUST be X-latched */
rec_t* rec, /*!< in/out: record */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in: index page of rec */
bool rollback,/*!< in: performing rollback? */
mtr_t* mtr) /*!< in: mini-transaction handle which contains
an X-latch to record page and to the index
@@ -8233,7 +8196,7 @@ btr_rec_free_externally_stored_fields(
if (rec_offs_nth_extern(offsets, i)) {
btr_free_externally_stored_field(
index, btr_rec_get_field_ref(rec, offsets, i),
- rec, offsets, page_zip, i, rollback, mtr);
+ rec, offsets, block, i, rollback, mtr);
}
}
}
@@ -8248,8 +8211,7 @@ btr_rec_free_updated_extern_fields(
dict_index_t* index, /*!< in: index of rec; the index tree MUST be
X-latched */
rec_t* rec, /*!< in/out: record */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in: index page of rec */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const upd_t* update, /*!< in: update vector */
bool rollback,/*!< in: performing rollback? */
@@ -8277,7 +8239,7 @@ btr_rec_free_updated_extern_fields(
btr_free_externally_stored_field(
index, data + len - BTR_EXTERN_FIELD_REF_SIZE,
- rec, offsets, page_zip,
+ rec, offsets, block,
ufield->field_no, rollback, mtr);
}
}
diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc
index 4605aa0c5e1..72f82a88f49 100644
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@ -71,25 +71,13 @@ buf_dblwr_page_inside(
return(FALSE);
}
-/****************************************************************//**
-Calls buf_page_get() on the TRX_SYS_PAGE and returns a pointer to the
-doublewrite buffer within it.
-@return pointer to the doublewrite buffer within the filespace header
-page. */
-UNIV_INLINE
-byte*
-buf_dblwr_get(
-/*==========*/
- mtr_t* mtr) /*!< in/out: MTR to hold the page latch */
+/** @return the TRX_SYS page */
+inline buf_block_t *buf_dblwr_trx_sys_get(mtr_t *mtr)
{
- buf_block_t* block;
-
- block = buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
- 0, RW_X_LATCH, mtr);
-
- buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
-
- return(buf_block_get_frame(block) + TRX_SYS_DOUBLEWRITE);
+ buf_block_t *block= buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
+ 0, RW_X_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
+ return block;
}
/********************************************************************//**
@@ -106,12 +94,7 @@ buf_dblwr_sync_datafiles()
/****************************************************************//**
Creates or initialializes the doublewrite buffer at a database start. */
-static
-void
-buf_dblwr_init(
-/*===========*/
- byte* doublewrite) /*!< in: pointer to the doublewrite buf
- header on trx sys page */
+static void buf_dblwr_init(const byte *doublewrite)
{
ulint buf_size;
@@ -164,7 +147,6 @@ buf_dblwr_create()
{
buf_block_t* block2;
buf_block_t* new_block;
- byte* doublewrite;
byte* fseg_header;
ulint page_no;
ulint prev_page_no;
@@ -180,14 +162,15 @@ start_again:
mtr.start();
buf_dblwr_being_created = TRUE;
- doublewrite = buf_dblwr_get(&mtr);
+ buf_block_t *trx_sys_block = buf_dblwr_trx_sys_get(&mtr);
- if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC)
+ if (mach_read_from_4(TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
+ + trx_sys_block->frame)
== TRX_SYS_DOUBLEWRITE_MAGIC_N) {
/* The doublewrite buffer has already been created:
just read in some numbers */
- buf_dblwr_init(doublewrite);
+ buf_dblwr_init(TRX_SYS_DOUBLEWRITE + trx_sys_block->frame);
mtr.commit();
buf_dblwr_being_created = FALSE;
@@ -229,7 +212,8 @@ too_small:
buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);
- fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG;
+ fseg_header = TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG
+ + trx_sys_block->frame;
prev_page_no = 0;
for (i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
@@ -265,30 +249,38 @@ too_small:
recv_parse_or_apply_log_rec_body() will see a valid
page type. The flushes of new_block are actually
unnecessary here. */
- ut_d(mlog_write_ulint(FIL_PAGE_TYPE + new_block->frame,
- FIL_PAGE_TYPE_SYS, MLOG_2BYTES, &mtr));
+ ut_d(mtr.write<2>(*new_block,
+ FIL_PAGE_TYPE + new_block->frame,
+ FIL_PAGE_TYPE_SYS));
if (i == FSP_EXTENT_SIZE / 2) {
ut_a(page_no == FSP_EXTENT_SIZE);
- mlog_write_ulint(doublewrite
- + TRX_SYS_DOUBLEWRITE_BLOCK1,
- page_no, MLOG_4BYTES, &mtr);
- mlog_write_ulint(doublewrite
- + TRX_SYS_DOUBLEWRITE_REPEAT
- + TRX_SYS_DOUBLEWRITE_BLOCK1,
- page_no, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_BLOCK1
+ + trx_sys_block->frame,
+ page_no);
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_REPEAT
+ + TRX_SYS_DOUBLEWRITE_BLOCK1
+ + trx_sys_block->frame,
+ page_no);
} else if (i == FSP_EXTENT_SIZE / 2
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
ut_a(page_no == 2 * FSP_EXTENT_SIZE);
- mlog_write_ulint(doublewrite
- + TRX_SYS_DOUBLEWRITE_BLOCK2,
- page_no, MLOG_4BYTES, &mtr);
- mlog_write_ulint(doublewrite
- + TRX_SYS_DOUBLEWRITE_REPEAT
- + TRX_SYS_DOUBLEWRITE_BLOCK2,
- page_no, MLOG_4BYTES, &mtr);
-
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_BLOCK2
+ + trx_sys_block->frame,
+ page_no);
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_REPEAT
+ + TRX_SYS_DOUBLEWRITE_BLOCK2
+ + trx_sys_block->frame,
+ page_no);
} else if (i > FSP_EXTENT_SIZE / 2) {
ut_a(page_no == prev_page_no + 1);
}
@@ -303,29 +295,32 @@ too_small:
lock the fseg header too many times. Since
this code is not done while any other threads
are active, restart the MTR occasionally. */
- mtr_commit(&mtr);
- mtr_start(&mtr);
- doublewrite = buf_dblwr_get(&mtr);
- fseg_header = doublewrite
- + TRX_SYS_DOUBLEWRITE_FSEG;
+ mtr.commit();
+ mtr.start();
+ trx_sys_block = buf_dblwr_trx_sys_get(&mtr);
+ fseg_header = TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_FSEG
+ + trx_sys_block->frame;
}
prev_page_no = page_no;
}
- mlog_write_ulint(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC,
- TRX_SYS_DOUBLEWRITE_MAGIC_N,
- MLOG_4BYTES, &mtr);
- mlog_write_ulint(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC
- + TRX_SYS_DOUBLEWRITE_REPEAT,
- TRX_SYS_DOUBLEWRITE_MAGIC_N,
- MLOG_4BYTES, &mtr);
-
- mlog_write_ulint(doublewrite
- + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED,
- TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N,
- MLOG_4BYTES, &mtr);
- mtr_commit(&mtr);
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
+ + trx_sys_block->frame,
+ TRX_SYS_DOUBLEWRITE_MAGIC_N);
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
+ + TRX_SYS_DOUBLEWRITE_REPEAT
+ + trx_sys_block->frame,
+ TRX_SYS_DOUBLEWRITE_MAGIC_N);
+
+ mtr.write<4>(*trx_sys_block,
+ TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED
+ + trx_sys_block->frame,
+ TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N);
+ mtr.commit();
/* Flush the modified pages to disk and make a checkpoint */
log_make_checkpoint();
diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc
index cb763e6b099..f9d53267728 100644
--- a/storage/innobase/dict/dict0boot.cc
+++ b/storage/innobase/dict/dict0boot.cc
@@ -35,24 +35,13 @@ Created 4/18/1996 Heikki Tuuri
#include "log0recv.h"
#include "os0file.h"
-/**********************************************************************//**
-Gets a pointer to the dictionary header and x-latches its page.
-@return pointer to the dictionary header, page x-latched */
-dict_hdr_t*
-dict_hdr_get(
-/*=========*/
- mtr_t* mtr) /*!< in: mtr */
+/** @return the DICT_HDR block, x-latched */
+buf_block_t *dict_hdr_get(mtr_t* mtr)
{
- buf_block_t* block;
- dict_hdr_t* header;
-
- block = buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO),
- 0, RW_X_LATCH, mtr);
- header = DICT_HDR + buf_block_get_frame(block);
-
- buf_block_dbg_add_level(block, SYNC_DICT_HEADER);
-
- return(header);
+ buf_block_t *block= buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO),
+ 0, RW_X_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_DICT_HEADER);
+ return block;
}
/**********************************************************************//**
@@ -67,36 +56,41 @@ dict_hdr_get_new_id(
ulint* space_id) /*!< out: space id
(not assigned if NULL) */
{
- dict_hdr_t* dict_hdr;
ib_id_t id;
mtr_t mtr;
- mtr_start(&mtr);
- dict_hdr = dict_hdr_get(&mtr);
+ mtr.start();
+ buf_block_t* dict_hdr = dict_hdr_get(&mtr);
if (table_id) {
- id = mach_read_from_8(dict_hdr + DICT_HDR_TABLE_ID);
+ id = mach_read_from_8(DICT_HDR + DICT_HDR_TABLE_ID
+ + dict_hdr->frame);
id++;
- mlog_write_ull(dict_hdr + DICT_HDR_TABLE_ID, id, &mtr);
+ mtr.write<8>(*dict_hdr, DICT_HDR + DICT_HDR_TABLE_ID
+ + dict_hdr->frame, id);
*table_id = id;
}
if (index_id) {
- id = mach_read_from_8(dict_hdr + DICT_HDR_INDEX_ID);
+ id = mach_read_from_8(DICT_HDR + DICT_HDR_INDEX_ID
+ + dict_hdr->frame);
id++;
- mlog_write_ull(dict_hdr + DICT_HDR_INDEX_ID, id, &mtr);
+ mtr.write<8>(*dict_hdr, DICT_HDR + DICT_HDR_INDEX_ID
+ + dict_hdr->frame, id);
*index_id = id;
}
if (space_id) {
- *space_id = mach_read_from_4(dict_hdr + DICT_HDR_MAX_SPACE_ID);
+ *space_id = mach_read_from_4(DICT_HDR + DICT_HDR_MAX_SPACE_ID
+ + dict_hdr->frame);
if (fil_assign_new_space_id(space_id)) {
- mlog_write_ulint(dict_hdr + DICT_HDR_MAX_SPACE_ID,
- *space_id, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*dict_hdr,
+ DICT_HDR + DICT_HDR_MAX_SPACE_ID
+ + dict_hdr->frame, *space_id);
}
}
- mtr_commit(&mtr);
+ mtr.commit();
}
/**********************************************************************//**
@@ -106,7 +100,6 @@ void
dict_hdr_flush_row_id(void)
/*=======================*/
{
- dict_hdr_t* dict_hdr;
row_id_t id;
mtr_t mtr;
@@ -114,13 +107,13 @@ dict_hdr_flush_row_id(void)
id = dict_sys.row_id;
- mtr_start(&mtr);
+ mtr.start();
- dict_hdr = dict_hdr_get(&mtr);
+ buf_block_t* d = dict_hdr_get(&mtr);
- mlog_write_ull(dict_hdr + DICT_HDR_ROW_ID, id, &mtr);
+ mtr.write<8>(*d, DICT_HDR + DICT_HDR_ROW_ID + d->frame, id);
- mtr_commit(&mtr);
+ mtr.commit();
}
/*****************************************************************//**
@@ -134,7 +127,6 @@ dict_hdr_create(
mtr_t* mtr) /*!< in: mtr */
{
buf_block_t* block;
- dict_hdr_t* dict_header;
ulint root_page_no;
ut_ad(mtr);
@@ -147,24 +139,22 @@ dict_hdr_create(
ut_a(DICT_HDR_PAGE_NO == block->page.id.page_no());
- dict_header = dict_hdr_get(mtr);
+ buf_block_t* d = dict_hdr_get(mtr);
/* Start counting row, table, index, and tree ids from
DICT_HDR_FIRST_ID */
- mlog_write_ull(dict_header + DICT_HDR_ROW_ID,
- DICT_HDR_FIRST_ID, mtr);
-
- mlog_write_ull(dict_header + DICT_HDR_TABLE_ID,
- DICT_HDR_FIRST_ID, mtr);
-
- mlog_write_ull(dict_header + DICT_HDR_INDEX_ID,
- DICT_HDR_FIRST_ID, mtr);
+ mtr->write<8>(*d, DICT_HDR + DICT_HDR_ROW_ID + d->frame,
+ DICT_HDR_FIRST_ID);
+ mtr->write<8>(*d, DICT_HDR + DICT_HDR_TABLE_ID + d->frame,
+ DICT_HDR_FIRST_ID);
+ mtr->write<8>(*d, DICT_HDR + DICT_HDR_INDEX_ID + d->frame,
+ DICT_HDR_FIRST_ID);
- ut_ad(mach_read_from_4(dict_header + DICT_HDR_MAX_SPACE_ID) == 0);
+ ut_ad(!mach_read_from_4(DICT_HDR + DICT_HDR_MAX_SPACE_ID + d->frame));
/* Obsolete, but we must initialize it anyway. */
- mlog_write_ulint(dict_header + DICT_HDR_MIX_ID_LOW,
- DICT_HDR_FIRST_ID, MLOG_4BYTES, mtr);
+ mtr->write<4>(*d, DICT_HDR + DICT_HDR_MIX_ID_LOW + d->frame,
+ DICT_HDR_FIRST_ID);
/* Create the B-tree roots for the clustered indexes of the basic
system tables */
@@ -178,8 +168,7 @@ dict_hdr_create(
return(FALSE);
}
- mlog_write_ulint(dict_header + DICT_HDR_TABLES, root_page_no,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*d, DICT_HDR + DICT_HDR_TABLES + d->frame, root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_UNIQUE,
fil_system.sys_space, DICT_TABLE_IDS_ID,
@@ -189,8 +178,8 @@ dict_hdr_create(
return(FALSE);
}
- mlog_write_ulint(dict_header + DICT_HDR_TABLE_IDS, root_page_no,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*d, DICT_HDR + DICT_HDR_TABLE_IDS + d->frame,
+ root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_COLUMNS_ID,
@@ -200,8 +189,8 @@ dict_hdr_create(
return(FALSE);
}
- mlog_write_ulint(dict_header + DICT_HDR_COLUMNS, root_page_no,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*d, DICT_HDR + DICT_HDR_COLUMNS + d->frame,
+ root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_INDEXES_ID,
@@ -211,8 +200,8 @@ dict_hdr_create(
return(FALSE);
}
- mlog_write_ulint(dict_header + DICT_HDR_INDEXES, root_page_no,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*d, DICT_HDR + DICT_HDR_INDEXES + d->frame,
+ root_page_no);
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_FIELDS_ID,
@@ -222,8 +211,7 @@ dict_hdr_create(
return(FALSE);
}
- mlog_write_ulint(dict_header + DICT_HDR_FIELDS, root_page_no,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*d, DICT_HDR + DICT_HDR_FIELDS + d->frame, root_page_no);
/*--------------------------*/
return(TRUE);
@@ -239,7 +227,6 @@ dict_boot(void)
{
dict_table_t* table;
dict_index_t* index;
- dict_hdr_t* dict_hdr;
mem_heap_t* heap;
mtr_t mtr;
@@ -271,7 +258,7 @@ dict_boot(void)
mutex_enter(&dict_sys.mutex);
/* Get the dictionary header */
- dict_hdr = dict_hdr_get(&mtr);
+ const byte* dict_hdr = &dict_hdr_get(&mtr)->frame[DICT_HDR];
/* Because we only write new row ids to disk-based data structure
(dictionary header) when it is divisible by
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index 1d2c570e1a1..105ba6d1bef 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -373,16 +373,18 @@ dict_build_table_def_step(
mtr.start();
undo->table_id = trx->table_id;
undo->dict_operation = TRUE;
- page_t* page = trx_undo_page_get(
+ buf_block_t* block = trx_undo_page_get(
page_id_t(trx->rsegs.m_redo.rseg->space->id,
undo->hdr_page_no),
&mtr);
- mlog_write_ulint(page + undo->hdr_offset
- + TRX_UNDO_DICT_TRANS,
- TRUE, MLOG_1BYTE, &mtr);
- mlog_write_ull(page + undo->hdr_offset
- + TRX_UNDO_TABLE_ID,
- trx->table_id, &mtr);
+ mtr.write<1,mtr_t::OPT>(
+ *block,
+ block->frame + undo->hdr_offset
+ + TRX_UNDO_DICT_TRANS, 1U);
+ mtr.write<8,mtr_t::OPT>(
+ *block,
+ block->frame + undo->hdr_offset
+ + TRX_UNDO_TABLE_ID, trx->table_id);
mtr.commit();
log_write_up_to(mtr.commit_lsn(), true);
}
@@ -851,14 +853,13 @@ dict_create_index_tree_step(
err = DB_OUT_OF_FILE_SPACE; );
}
- ulint len;
- byte* data = rec_get_nth_field_old(btr_pcur_get_rec(&pcur),
+ ulint len;
+ byte* data = rec_get_nth_field_old(btr_pcur_get_rec(&pcur),
DICT_FLD__SYS_INDEXES__PAGE_NO,
&len);
ut_ad(len == 4);
- if (mach_read_from_4(data) != node->page_no) {
- mlog_write_ulint(data, node->page_no, MLOG_4BYTES, &mtr);
- }
+ mtr.write<4,mtr_t::OPT>(*btr_pcur_get_block(&pcur), data,
+ node->page_no);
mtr.commit();
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 1f434a882d5..b5cc82082aa 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -4013,7 +4013,7 @@ dict_set_corrupted(
if (len != 4) {
goto fail;
}
- mlog_write_ulint(field, index->type, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*btr_cur_get_block(&cursor), field, index->type);
status = "Flagged";
} else {
fail:
@@ -4113,11 +4113,8 @@ dict_index_set_merge_threshold(
DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD, &len);
ut_ad(len == 4);
-
- if (len == 4) {
- mlog_write_ulint(field, merge_threshold,
- MLOG_4BYTES, &mtr);
- }
+ mtr.write<4,mtr_t::OPT>(*btr_cur_get_block(&cursor), field,
+ merge_threshold);
}
mtr_commit(&mtr);
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 4aecc33a738..752c9fd2c85 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -1481,7 +1481,8 @@ void dict_check_tablespaces_and_store_max_id()
/* Initialize the max space_id from sys header */
mtr.start();
ulint max_space_id = mach_read_from_4(DICT_HDR_MAX_SPACE_ID
- + dict_hdr_get(&mtr));
+ + DICT_HDR
+ + dict_hdr_get(&mtr)->frame);
mtr.commit();
fil_set_max_space_id_if_bigger(max_space_id);
diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc
index a525c660102..044e6312025 100644
--- a/storage/innobase/fil/fil0crypt.cc
+++ b/storage/innobase/fil/fil0crypt.cc
@@ -2035,8 +2035,9 @@ fil_crypt_rotate_page(
modified = true;
/* force rotation by dummy updating page */
- mlog_write_ulint(frame + FIL_PAGE_SPACE_ID,
- space_id, MLOG_4BYTES, &mtr);
+ mtr.write<1,mtr_t::FORCED>(*block,
+ &frame[FIL_PAGE_SPACE_ID],
+ frame[FIL_PAGE_SPACE_ID]);
/* statistics */
state->crypt_stat.pages_modified++;
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index e97a4e3ae0d..c24c3025823 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -3899,8 +3899,8 @@ void fsp_flags_try_adjust(fil_space_t* space, ulint flags)
<< " to " << ib::hex(flags);
}
mtr.set_named_space(space);
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS
- + b->frame, flags, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*b, FSP_HEADER_OFFSET + FSP_SPACE_FLAGS
+ + b->frame, flags);
}
func_exit:
mtr.commit();
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index b6b244a45ba..1d8e910092a 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -59,18 +59,6 @@ fsp_free_extent(
page_no_t offset,
mtr_t* mtr);
-/********************************************************************//**
-Marks a page used. The page must reside within the extents of the given
-segment. */
-static MY_ATTRIBUTE((nonnull))
-void
-fseg_mark_page_used(
-/*================*/
- fseg_inode_t* seg_inode,/*!< in: segment inode */
- page_no_t page, /*!< in: page offset */
- xdes_t* descr, /*!< in: extent descriptor */
- mtr_t* mtr); /*!< in/out: mini-transaction */
-
/** Returns the first extent descriptor for a segment.
We think of the extent lists of the segment catenated in the order
FSEG_FULL -> FSEG_NOT_FULL -> FSEG_FREE.
@@ -100,14 +88,15 @@ void
fsp_fill_free_list(
bool init_space,
fil_space_t* space,
- fsp_header_t* header,
+ buf_block_t* header,
mtr_t* mtr);
/** Allocates a single free page from a segment.
-This function implements the intelligent allocation strategy which tries
-to minimize file space fragmentation.
+This function implements the intelligent allocation strategy which tries to
+minimize file space fragmentation.
@param[in,out] space tablespace
@param[in,out] seg_inode segment inode
+@param[in,out] iblock segment inode page
@param[in] hint hint of which page would be desirable
@param[in] direction if the new page is needed because of
an index page split, and records are inserted there in order, into which
@@ -128,6 +117,7 @@ buf_block_t*
fseg_alloc_free_page_low(
fil_space_t* space,
fseg_inode_t* seg_inode,
+ buf_block_t* iblock,
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
@@ -139,36 +129,35 @@ fseg_alloc_free_page_low(
)
MY_ATTRIBUTE((warn_unused_result));
-/** Gets a pointer to the space header and x-locks its page.
-@param[in] space tablespace
-@param[in,out] mtr mini-transaction
+/** Get the tablespace header block, SX-latched
+@param[in] space tablespace
+@param[in,out] mtr mini-transaction
@return pointer to the space header, page x-locked */
-inline fsp_header_t* fsp_get_space_header(const fil_space_t* space, mtr_t* mtr)
+inline buf_block_t *fsp_get_header(const fil_space_t *space, mtr_t *mtr)
{
- buf_block_t* block;
- fsp_header_t* header;
-
- ut_ad(space->purpose != FIL_TYPE_LOG);
-
- block = buf_page_get(page_id_t(space->id, 0), space->zip_size(),
- RW_SX_LATCH, mtr);
- header = FSP_HEADER_OFFSET + buf_block_get_frame(block);
- buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
-
- ut_ad(space->id == mach_read_from_4(FSP_SPACE_ID + header));
- return(header);
+ ut_ad(space->purpose != FIL_TYPE_LOG);
+
+ buf_block_t *block= buf_page_get(page_id_t(space->id, 0), space->zip_size(),
+ RW_SX_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
+ ut_ad(space->id == mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_ID +
+ block->frame));
+ return block;
}
/** Set the XDES_FREE_BIT of a page.
@tparam free desired value of XDES_FREE_BIT
+@param[in] block extent descriptor block
@param[in,out] descr extent descriptor
@param[in] offset page offset within the extent
@param[in,out] mtr mini-transaction */
template<bool free>
-inline void xdes_set_free(xdes_t *descr, ulint offset, mtr_t *mtr)
+inline void xdes_set_free(const buf_block_t &block, xdes_t *descr,
+ ulint offset, mtr_t *mtr)
{
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(offset < FSP_EXTENT_SIZE);
+ ut_ad(page_align(descr) == block.frame);
compile_time_assert(XDES_BITS_PER_PAGE == 2);
compile_time_assert(XDES_FREE_BIT == 0);
compile_time_assert(XDES_CLEAN_BIT == 1);
@@ -181,7 +170,7 @@ inline void xdes_set_free(xdes_t *descr, ulint offset, mtr_t *mtr)
byte val= free
? *b | 1 << (index & 7)
: *b & ~(1 << (index & 7));
- mlog_write_ulint(b, val, MLOG_1BYTE, mtr);
+ mtr->write<1>(block, b, val);
}
/**
@@ -226,22 +215,21 @@ inline bool xdes_is_full(const xdes_t *descr)
return FSP_EXTENT_SIZE == xdes_get_n_used(descr);
}
-/**********************************************************************//**
-Sets the state of an xdes. */
-UNIV_INLINE
-void
-xdes_set_state(
-/*===========*/
- xdes_t* descr, /*!< in/out: descriptor */
- ulint state, /*!< in: state to set */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+/** Set the state of an extent descriptor.
+@param[in] block extent descriptor block
+@param[in,out] descr extent descriptor
+@param[in] state the state
+@param[in,out] mtr mini-transaction */
+inline void xdes_set_state(const buf_block_t &block, xdes_t *descr,
+ byte state, mtr_t *mtr)
{
- ut_ad(descr && mtr);
- ut_ad(state >= XDES_FREE);
- ut_ad(state <= XDES_FSEG);
- ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
-
- mlog_write_ulint(descr + XDES_STATE, state, MLOG_4BYTES, mtr);
+ ut_ad(descr && mtr);
+ ut_ad(state >= XDES_FREE);
+ ut_ad(state <= XDES_FSEG);
+ ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(page_align(descr) == block.frame);
+ ut_ad(mach_read_from_4(descr + XDES_STATE) <= XDES_FSEG);
+ mtr->write<1>(block, XDES_STATE + 3 + descr, state);
}
/**********************************************************************//**
@@ -266,48 +254,86 @@ xdes_get_state(
/**********************************************************************//**
Inits an extent descriptor to the free and clean state. */
-UNIV_INLINE
+inline void xdes_init(const buf_block_t &block, xdes_t *descr, mtr_t *mtr)
+{
+ ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
+ mlog_memset(descr + XDES_BITMAP, XDES_SIZE - XDES_BITMAP, 0xff, mtr);
+ xdes_set_state(block, descr, XDES_FREE, mtr);
+}
+
+/** Mark a page used in an extent descriptor.
+@param[in,out] seg_inode segment inode
+@param[in,out] iblock segment inode page
+@param[in] page page number
+@param[in,out] descr extent descriptor
+@param[in,out] xdes extent descriptor page
+@param[in,out] mtr mini-transaction */
+static MY_ATTRIBUTE((nonnull))
void
-xdes_init(
-/*======*/
- xdes_t* descr, /*!< in: descriptor */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+fseg_mark_page_used(fseg_inode_t *seg_inode, buf_block_t *iblock,
+ ulint page, xdes_t *descr, buf_block_t *xdes, mtr_t *mtr)
{
- ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
- mlog_memset(descr + XDES_BITMAP, XDES_SIZE - XDES_BITMAP, 0xff, mtr);
- xdes_set_state(descr, XDES_FREE, mtr);
+ ut_ad(fil_page_get_type(iblock->frame) == FIL_PAGE_INODE);
+ ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
+ ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
+ ut_ad(!memcmp(seg_inode + FSEG_ID, descr + XDES_ID, 4));
+
+ const uint16_t xoffset= XDES_FLST_NODE + uint16_t(descr - xdes->frame);
+ const uint16_t ioffset= uint16_t(seg_inode - iblock->frame);
+
+ if (!xdes_get_n_used(descr))
+ {
+ /* We move the extent from the free list to the NOT_FULL list */
+ flst_remove(iblock, FSEG_FREE + ioffset, xdes, xoffset, mtr);
+ flst_add_last(iblock, FSEG_NOT_FULL + ioffset, xdes, xoffset, mtr);
+ }
+
+ ut_ad(xdes_is_free(descr, page % FSP_EXTENT_SIZE));
+
+ /* We mark the page as used */
+ xdes_set_free<false>(*xdes, descr, page % FSP_EXTENT_SIZE, mtr);
+
+ byte* p_not_full= seg_inode + FSEG_NOT_FULL_N_USED;
+ const uint32_t not_full_n_used= mach_read_from_4(p_not_full) + 1;
+ mtr->write<4>(*iblock, p_not_full, not_full_n_used);
+ if (xdes_is_full(descr))
+ {
+ /* We move the extent from the NOT_FULL list to the FULL list */
+ flst_remove(iblock, FSEG_NOT_FULL + ioffset, xdes, xoffset, mtr);
+ flst_add_last(iblock, FSEG_FULL + ioffset, xdes, xoffset, mtr);
+ mtr->write<4>(*iblock, seg_inode + FSEG_NOT_FULL_N_USED,
+ not_full_n_used - FSP_EXTENT_SIZE);
+ }
}
/** Get pointer to a the extent descriptor of a page.
@param[in,out] sp_header tablespace header page, x-latched
@param[in] space tablespace
@param[in] offset page offset
+@param[out] desc_block descriptor block
@param[in,out] mtr mini-transaction
@param[in] init_space whether the tablespace is being initialized
-@param[out] desc_block descriptor block, or NULL if it is
-the same as the tablespace header
@return pointer to the extent descriptor, NULL if the page does not
exist in the space or if the offset exceeds free limit */
UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
xdes_t*
xdes_get_descriptor_with_space_hdr(
- fsp_header_t* sp_header,
+ buf_block_t* header,
const fil_space_t* space,
page_no_t offset,
+ buf_block_t** desc_block,
mtr_t* mtr,
- bool init_space = false,
- buf_block_t** desc_block = NULL)
+ bool init_space = false)
{
ulint limit;
ulint size;
ulint descr_page_no;
- page_t* descr_page;
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
- ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_SX_FIX));
- ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET);
+ ut_ad(mtr_memo_contains(mtr, header, MTR_MEMO_PAGE_SX_FIX));
/* Read free limit and space size */
- limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT);
- size = mach_read_from_4(sp_header + FSP_SIZE);
+ limit = mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ + header->frame);
+ size = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE + header->frame);
ut_ad(limit == space->free_limit
|| (space->free_limit == 0
&& (init_space
@@ -325,29 +351,23 @@ xdes_get_descriptor_with_space_hdr(
descr_page_no = xdes_calc_descriptor_page(zip_size, offset);
- buf_block_t* block;
-
- if (descr_page_no == 0) {
- /* It is on the space header page */
+ buf_block_t* block = header;
- descr_page = page_align(sp_header);
- block = NULL;
- } else {
+ if (descr_page_no) {
block = buf_page_get(
page_id_t(space->id, descr_page_no), zip_size,
RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
-
- descr_page = buf_block_get_frame(block);
}
if (desc_block != NULL) {
*desc_block = block;
}
- return(descr_page + XDES_ARR_OFFSET
- + XDES_SIZE * xdes_calc_descriptor_index(zip_size, offset));
+ return XDES_ARR_OFFSET + XDES_SIZE
+ * xdes_calc_descriptor_index(zip_size, offset)
+ + block->frame;
}
/** Get the extent descriptor of a page.
@@ -359,24 +379,16 @@ defined, as they are uninitialized above the free limit.
@param[in] space tablespace
@param[in] offset page offset; if equal to the free limit, we
try to add new extents to the space free list
+@param[out] xdes extent descriptor page
@param[in,out] mtr mini-transaction
@return the extent descriptor */
-MY_ATTRIBUTE((warn_unused_result))
-static
-xdes_t*
-xdes_get_descriptor(const fil_space_t* space, page_no_t offset, mtr_t* mtr)
+static xdes_t* xdes_get_descriptor(const fil_space_t *space, page_no_t offset,
+ buf_block_t **xdes, mtr_t *mtr)
{
- buf_block_t* block;
- fsp_header_t* sp_header;
-
- block = buf_page_get(page_id_t(space->id, 0), space->zip_size(),
- RW_SX_LATCH, mtr);
-
- buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
-
- sp_header = FSP_HEADER_OFFSET + buf_block_get_frame(block);
- return(xdes_get_descriptor_with_space_hdr(
- sp_header, space, offset, mtr));
+ buf_block_t *block= buf_page_get(page_id_t(space->id, 0), space->zip_size(),
+ RW_SX_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
+ return xdes_get_descriptor_with_space_hdr(block, space, offset, xdes, mtr);
}
/** Get the extent descriptor of a page.
@@ -429,6 +441,7 @@ extent descriptor resides is x-locked.
@param[in] space tablespace
@param[in] lst_node file address of the list node
contained in the descriptor
+@param[out] block extent descriptor block
@param[in,out] mtr mini-transaction
@return pointer to the extent descriptor */
MY_ATTRIBUTE((nonnull, warn_unused_result))
@@ -437,11 +450,12 @@ xdes_t*
xdes_lst_get_descriptor(
const fil_space_t* space,
fil_addr_t lst_node,
+ buf_block_t** block,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
return fut_get_ptr(space->id, space->zip_size(),
- lst_node, RW_SX_LATCH, mtr)
+ lst_node, RW_SX_LATCH, mtr, block)
- XDES_FLST_NODE;
}
@@ -559,20 +573,20 @@ void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
fsp_init_file_page(space, block, mtr);
- mlog_write_ulint(block->frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_FSP_HDR,
- MLOG_2BYTES, mtr);
+ mtr->write<2>(*block, block->frame + FIL_PAGE_TYPE,
+ FIL_PAGE_TYPE_FSP_HDR);
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_ID + block->frame,
- space->id, MLOG_4BYTES, mtr);
+ mtr->write<4,mtr_t::OPT>(*block, FSP_HEADER_OFFSET + FSP_SPACE_ID
+ + block->frame, space->id);
ut_ad(0 == mach_read_from_4(FSP_HEADER_OFFSET + FSP_NOT_USED
+ block->frame));
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SIZE + block->frame, size,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*block, FSP_HEADER_OFFSET + FSP_SIZE + block->frame,
+ size);
ut_ad(0 == mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ block->frame));
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + block->frame,
- space->flags & ~FSP_FLAGS_MEM_MASK,
- MLOG_4BYTES, mtr);
+ mtr->write<4,mtr_t::OPT>(*block, FSP_HEADER_OFFSET + FSP_SPACE_FLAGS
+ + block->frame,
+ space->flags & ~FSP_FLAGS_MEM_MASK);
ut_ad(0 == mach_read_from_4(FSP_HEADER_OFFSET + FSP_FRAG_N_USED
+ block->frame));
@@ -582,10 +596,11 @@ void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
flst_init(block, FSP_HEADER_OFFSET + FSP_SEG_INODES_FULL, mtr);
flst_init(block, FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE, mtr);
- mlog_write_ull(FSP_HEADER_OFFSET + FSP_SEG_ID + block->frame, 1, mtr);
+ mtr->write<8>(*block, FSP_HEADER_OFFSET + FSP_SEG_ID + block->frame,
+ 1U);
fsp_fill_free_list(!is_system_tablespace(space->id),
- space, FSP_HEADER_OFFSET + block->frame, mtr);
+ space, block, mtr);
/* Write encryption metadata to page 0 if tablespace is
encrypted or encryption is disabled by table option. */
@@ -635,7 +650,7 @@ bool
fsp_try_extend_data_file_with_pages(
fil_space_t* space,
ulint page_no,
- fsp_header_t* header,
+ buf_block_t* header,
mtr_t* mtr)
{
bool success;
@@ -644,14 +659,15 @@ fsp_try_extend_data_file_with_pages(
ut_a(!is_system_tablespace(space->id));
ut_d(space->modify_check(*mtr));
- size = mach_read_from_4(header + FSP_SIZE);
+ size = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE + header->frame);
ut_ad(size == space->size_in_header);
ut_a(page_no >= size);
success = fil_space_extend(space, page_no + 1);
/* The size may be less than we wanted if we ran out of disk space. */
- mlog_write_ulint(header + FSP_SIZE, space->size, MLOG_4BYTES, mtr);
+ mtr->write<4>(*header, FSP_HEADER_OFFSET + FSP_SIZE + header->frame,
+ space->size);
space->size_in_header = space->size;
return(success);
@@ -704,7 +720,7 @@ static ulint fsp_get_pages_to_extend_ibd(ulint physical_size, ulint size)
ATTRIBUTE_COLD __attribute__((nonnull))
static
ulint
-fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
+fsp_try_extend_data_file(fil_space_t *space, buf_block_t *header, mtr_t *mtr)
{
ulint size; /* current number of pages in the datafile */
ulint size_increase; /* number of pages to extend this file */
@@ -744,7 +760,7 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
return(0);
}
- size = mach_read_from_4(header + FSP_SIZE);
+ size = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE + header->frame);
ut_ad(size == space->size_in_header);
const ulint ps = space->physical_size();
@@ -784,8 +800,8 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
space->size_in_header = ut_2pow_round(space->size, (1024 * 1024) / ps);
- mlog_write_ulint(
- header + FSP_SIZE, space->size_in_header, MLOG_4BYTES, mtr);
+ mtr->write<4>(*header, FSP_HEADER_OFFSET + FSP_SIZE + header->frame,
+ space->size_in_header);
return(size_increase);
}
@@ -803,7 +819,7 @@ void fil_block_reset_type(const buf_block_t& block, ulint type, mtr_t* mtr)
ib::info()
<< "Resetting invalid page " << block.page.id << " type "
<< fil_page_get_type(block.frame) << " to " << type << ".";
- mlog_write_ulint(block.frame + FIL_PAGE_TYPE, type, MLOG_2BYTES, mtr);
+ mtr->write<2>(block, block.frame + FIL_PAGE_TYPE, type);
}
/** Put new extents to the free list if there are free extents above the free
@@ -820,22 +836,21 @@ void
fsp_fill_free_list(
bool init_space,
fil_space_t* space,
- fsp_header_t* header,
+ buf_block_t* header,
mtr_t* mtr)
{
ulint limit;
ulint size;
xdes_t* descr;
ulint count = 0;
- ulint frag_n_used;
ulint i;
- ut_ad(page_offset(header) == FSP_HEADER_OFFSET);
ut_d(space->modify_check(*mtr));
/* Check if we can fill free list from above the free list limit */
- size = mach_read_from_4(header + FSP_SIZE);
- limit = mach_read_from_4(header + FSP_FREE_LIMIT);
+ size = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE + header->frame);
+ limit = mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ + header->frame);
ut_ad(size == space->size_in_header);
ut_ad(limit == space->free_limit);
@@ -868,8 +883,8 @@ fsp_fill_free_list(
== ut_2pow_remainder(i, ulint(space->physical_size()));
space->free_limit = i + FSP_EXTENT_SIZE;
- mlog_write_ulint(header + FSP_FREE_LIMIT, i + FSP_EXTENT_SIZE,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*header, FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ + header->frame, i + FSP_EXTENT_SIZE);
if (init_xdes) {
@@ -891,10 +906,9 @@ fsp_fill_free_list(
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, mtr);
- mlog_write_ulint(buf_block_get_frame(block)
- + FIL_PAGE_TYPE,
- FIL_PAGE_TYPE_XDES,
- MLOG_2BYTES, mtr);
+ mtr->write<2>(*block,
+ FIL_PAGE_TYPE + block->frame,
+ FIL_PAGE_TYPE_XDES);
}
/* Initialize the ibuf bitmap page in a separate
@@ -905,7 +919,7 @@ fsp_fill_free_list(
if (space->purpose != FIL_TYPE_TEMPORARY) {
mtr_t ibuf_mtr;
- mtr_start(&ibuf_mtr);
+ ibuf_mtr.start();
ibuf_mtr.set_named_space(space);
const page_id_t page_id(
@@ -922,21 +936,22 @@ fsp_fill_free_list(
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, &ibuf_mtr);
- mlog_write_ulint(block->frame + FIL_PAGE_TYPE,
- FIL_PAGE_IBUF_BITMAP,
- MLOG_2BYTES, &ibuf_mtr);
- mtr_commit(&ibuf_mtr);
+ ibuf_mtr.write<2>(*block,
+ block->frame + FIL_PAGE_TYPE,
+ FIL_PAGE_IBUF_BITMAP);
+ ibuf_mtr.commit();
}
}
- buf_block_t* desc_block = NULL;
+ buf_block_t* xdes;
descr = xdes_get_descriptor_with_space_hdr(
- header, space, i, mtr, init_space, &desc_block);
- if (desc_block && !space->full_crc32()) {
- fil_block_check_type(
- *desc_block, FIL_PAGE_TYPE_XDES, mtr);
+ header, space, i, &xdes, mtr, init_space);
+ if (xdes != header && !space->full_crc32()) {
+ fil_block_check_type(*xdes, FIL_PAGE_TYPE_XDES, mtr);
}
- xdes_init(descr, mtr);
+ xdes_init(*xdes, descr, mtr);
+ const uint16_t xoffset= XDES_FLST_NODE
+ + uint16_t(descr - xdes->frame);
if (UNIV_UNLIKELY(init_xdes)) {
@@ -944,20 +959,21 @@ fsp_fill_free_list(
and the second is an ibuf bitmap page: mark them
used */
- xdes_set_free<false>(descr, 0, mtr);
- xdes_set_free<false>(descr, FSP_IBUF_BITMAP_OFFSET,
- mtr);
- xdes_set_state(descr, XDES_FREE_FRAG, mtr);
-
- flst_add_last(header + FSP_FREE_FRAG,
- descr + XDES_FLST_NODE, mtr);
- frag_n_used = mach_read_from_4(
- header + FSP_FRAG_N_USED);
- mlog_write_ulint(header + FSP_FRAG_N_USED,
- frag_n_used + 2, MLOG_4BYTES, mtr);
+ xdes_set_free<false>(*xdes, descr, 0, mtr);
+ xdes_set_free<false>(*xdes, descr,
+ FSP_IBUF_BITMAP_OFFSET, mtr);
+ xdes_set_state(*xdes, descr, XDES_FREE_FRAG, mtr);
+
+ flst_add_last(header,
+ FSP_HEADER_OFFSET + FSP_FREE_FRAG,
+ xdes, xoffset, mtr);
+ byte* n_used = FSP_HEADER_OFFSET + FSP_FRAG_N_USED
+ + header->frame;
+ mtr->write<4>(*header, n_used,
+ 2U + mach_read_from_4(n_used));
} else {
- flst_add_last(header + FSP_FREE,
- descr + XDES_FLST_NODE, mtr);
+ flst_add_last(header, FSP_HEADER_OFFSET + FSP_FREE,
+ xdes, xoffset, mtr);
count++;
}
@@ -971,6 +987,7 @@ fsp_fill_free_list(
@param[in,out] space tablespace
@param[in] hint hint of which extent would be desirable: any
page offset in the extent goes; the hint must not be > FSP_FREE_LIMIT
+@param[out] xdes extent descriptor page
@param[in,out] mtr mini-transaction
@return extent descriptor, NULL if cannot be allocated */
static
@@ -978,19 +995,19 @@ xdes_t*
fsp_alloc_free_extent(
fil_space_t* space,
ulint hint,
+ buf_block_t** xdes,
mtr_t* mtr)
{
- fsp_header_t* header;
fil_addr_t first;
xdes_t* descr;
buf_block_t* desc_block = NULL;
- header = fsp_get_space_header(space, mtr);
+ buf_block_t* header = fsp_get_header(space, mtr);
descr = xdes_get_descriptor_with_space_hdr(
- header, space, hint, mtr, false, &desc_block);
+ header, space, hint, &desc_block, mtr);
- if (desc_block && !space->full_crc32()) {
+ if (desc_block != header && !space->full_crc32()) {
fil_block_check_type(*desc_block, FIL_PAGE_TYPE_XDES, mtr);
}
@@ -998,12 +1015,14 @@ fsp_alloc_free_extent(
/* Ok, we can take this extent */
} else {
/* Take the first extent in the free list */
- first = flst_get_first(header + FSP_FREE);
+ first = flst_get_first(FSP_HEADER_OFFSET + FSP_FREE
+ + header->frame);
if (fil_addr_is_null(first)) {
fsp_fill_free_list(false, space, header, mtr);
- first = flst_get_first(header + FSP_FREE);
+ first = flst_get_first(FSP_HEADER_OFFSET + FSP_FREE
+ + header->frame);
}
if (fil_addr_is_null(first)) {
@@ -1011,49 +1030,52 @@ fsp_alloc_free_extent(
return(NULL); /* No free extents left */
}
- descr = xdes_lst_get_descriptor(space, first, mtr);
+ descr = xdes_lst_get_descriptor(space, first, &desc_block,
+ mtr);
}
- flst_remove(header + FSP_FREE, descr + XDES_FLST_NODE, mtr);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_FREE,
+ desc_block, uint16_t(descr - desc_block->frame)
+ + XDES_FLST_NODE, mtr);
space->free_len--;
+ *xdes = desc_block;
return(descr);
}
-/**********************************************************************//**
-Allocates a single free page from a space. */
-static MY_ATTRIBUTE((nonnull))
-void
-fsp_alloc_from_free_frag(
-/*=====================*/
- fsp_header_t* header, /*!< in/out: tablespace header */
- xdes_t* descr, /*!< in/out: extent descriptor */
- ulint bit, /*!< in: slot to allocate in the extent */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+/** Allocate a single free page.
+@param[in,out] header tablespace header
+@param[in,out] xdes extent descriptor page
+@param[in,out] descr extent descriptor
+@param[in] bit slot to allocate in the extent
+@param[in,out] mtr mini-transaction */
+static void
+fsp_alloc_from_free_frag(buf_block_t *header, buf_block_t *xdes, xdes_t *descr,
+ ulint bit, mtr_t *mtr)
{
- ulint frag_n_used;
-
ut_ad(xdes_get_state(descr, mtr) == XDES_FREE_FRAG);
ut_a(xdes_is_free(descr, bit));
- xdes_set_free<false>(descr, bit, mtr);
+ xdes_set_free<false>(*xdes, descr, bit, mtr);
/* Update the FRAG_N_USED field */
- frag_n_used = mach_read_from_4(header + FSP_FRAG_N_USED);
- frag_n_used++;
- mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used, MLOG_4BYTES,
- mtr);
+ byte* n_used_p = FSP_HEADER_OFFSET + FSP_FRAG_N_USED + header->frame;
+
+ uint32_t n_used = mach_read_from_4(n_used_p) + 1;
+
if (xdes_is_full(descr)) {
/* The fragment is full: move it to another list */
- flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
- mtr);
- xdes_set_state(descr, XDES_FULL_FRAG, mtr);
+ const uint16_t xoffset= XDES_FLST_NODE
+ + uint16_t(descr - xdes->frame);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_FREE_FRAG,
+ xdes, xoffset, mtr);
+ xdes_set_state(*xdes, descr, XDES_FULL_FRAG, mtr);
- flst_add_last(header + FSP_FULL_FRAG, descr + XDES_FLST_NODE,
- mtr);
- mlog_write_ulint(header + FSP_FRAG_N_USED,
- frag_n_used - FSP_EXTENT_SIZE, MLOG_4BYTES,
- mtr);
+ flst_add_last(header, FSP_HEADER_OFFSET + FSP_FULL_FRAG,
+ xdes, xoffset, mtr);
+ n_used -= FSP_EXTENT_SIZE;
}
+
+ mtr->write<4>(*header, n_used_p, n_used);
}
/** Gets a buffer block for an allocated page.
@@ -1132,23 +1154,25 @@ fsp_alloc_free_page(
mtr_t* mtr,
mtr_t* init_mtr)
{
- fsp_header_t* header;
fil_addr_t first;
xdes_t* descr;
ulint free;
const ulint space_id = space->id;
ut_d(space->modify_check(*mtr));
- header = fsp_get_space_header(space, mtr);
+ buf_block_t* block = fsp_get_header(space, mtr);
+ buf_block_t *xdes;
/* Get the hinted descriptor */
- descr = xdes_get_descriptor_with_space_hdr(header, space, hint, mtr);
+ descr = xdes_get_descriptor_with_space_hdr(block, space, hint, &xdes,
+ mtr);
if (descr && (xdes_get_state(descr, mtr) == XDES_FREE_FRAG)) {
/* Ok, we can take this extent */
} else {
/* Else take the first extent in free_frag list */
- first = flst_get_first(header + FSP_FREE_FRAG);
+ first = flst_get_first(FSP_HEADER_OFFSET + FSP_FREE_FRAG
+ + block->frame);
if (fil_addr_is_null(first)) {
/* There are no partially full fragments: allocate
@@ -1158,7 +1182,7 @@ fsp_alloc_free_page(
FREE_FRAG list. But we will allocate our page from the
the free extent anyway. */
- descr = fsp_alloc_free_extent(space, hint, mtr);
+ descr = fsp_alloc_free_extent(space, hint, &xdes, mtr);
if (descr == NULL) {
/* No free space left */
@@ -1166,11 +1190,13 @@ fsp_alloc_free_page(
return(NULL);
}
- xdes_set_state(descr, XDES_FREE_FRAG, mtr);
- flst_add_last(header + FSP_FREE_FRAG,
- descr + XDES_FLST_NODE, mtr);
+ xdes_set_state(*xdes, descr, XDES_FREE_FRAG, mtr);
+ flst_add_last(block, FSP_HEADER_OFFSET + FSP_FREE_FRAG,
+ xdes, XDES_FLST_NODE
+ + uint16_t(descr - xdes->frame), mtr);
} else {
- descr = xdes_lst_get_descriptor(space, first, mtr);
+ descr = xdes_lst_get_descriptor(space, first, &xdes,
+ mtr);
}
/* Reset the hint */
@@ -1191,7 +1217,8 @@ fsp_alloc_free_page(
page_no_t page_no = xdes_get_offset(descr) + free;
- page_no_t space_size = mach_read_from_4(header + FSP_SIZE);
+ page_no_t space_size = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE
+ + block->frame);
ut_ad(space_size == space->size_in_header
|| (space_id == TRX_SYS_SPACE
&& srv_startup_is_before_trx_rollback_phase));
@@ -1210,13 +1237,13 @@ fsp_alloc_free_page(
}
if (!fsp_try_extend_data_file_with_pages(space, page_no,
- header, mtr)) {
+ block, mtr)) {
/* No disk space left */
return(NULL);
}
}
- fsp_alloc_from_free_frag(header, descr, free, mtr);
+ fsp_alloc_from_free_frag(block, xdes, descr, free, mtr);
return fsp_page_create(space, page_no, rw_latch, mtr, init_mtr);
}
@@ -1229,7 +1256,6 @@ The page is marked as free and clean.
static void fsp_free_page(fil_space_t* space, page_no_t offset,
bool log, mtr_t* mtr)
{
- fsp_header_t* header;
xdes_t* descr;
ulint state;
ulint frag_n_used;
@@ -1239,10 +1265,11 @@ static void fsp_free_page(fil_space_t* space, page_no_t offset,
/* fprintf(stderr, "Freeing page %lu in space %lu\n", page, space); */
- header = fsp_get_space_header(space, mtr);
+ buf_block_t* header = fsp_get_header(space, mtr);
+ buf_block_t* xdes;
- descr = xdes_get_descriptor_with_space_hdr(
- header, space, offset, mtr);
+ descr = xdes_get_descriptor_with_space_hdr(header, space, offset,
+ &xdes, mtr);
state = xdes_get_state(descr, mtr);
@@ -1292,56 +1319,57 @@ static void fsp_free_page(fil_space_t* space, page_no_t offset,
const ulint bit = offset % FSP_EXTENT_SIZE;
- xdes_set_free<true>(descr, bit, mtr);
+ xdes_set_free<true>(*xdes, descr, bit, mtr);
+
+ frag_n_used = mach_read_from_4(FSP_HEADER_OFFSET + FSP_FRAG_N_USED
+ + header->frame);
- frag_n_used = mach_read_from_4(header + FSP_FRAG_N_USED);
+ const uint16_t xoffset= XDES_FLST_NODE + uint16_t(descr - xdes->frame);
if (state == XDES_FULL_FRAG) {
/* The fragment was full: move it to another list */
- flst_remove(header + FSP_FULL_FRAG, descr + XDES_FLST_NODE,
- mtr);
- xdes_set_state(descr, XDES_FREE_FRAG, mtr);
- flst_add_last(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
- mtr);
- mlog_write_ulint(header + FSP_FRAG_N_USED,
- frag_n_used + FSP_EXTENT_SIZE - 1,
- MLOG_4BYTES, mtr);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_FULL_FRAG,
+ xdes, xoffset, mtr);
+ xdes_set_state(*xdes, descr, XDES_FREE_FRAG, mtr);
+ flst_add_last(header, FSP_HEADER_OFFSET + FSP_FREE_FRAG,
+ xdes, xoffset, mtr);
+ mtr->write<4>(*header, FSP_HEADER_OFFSET + FSP_FRAG_N_USED
+ + header->frame,
+ frag_n_used + FSP_EXTENT_SIZE - 1);
} else {
ut_a(frag_n_used > 0);
- mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used - 1,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*header, FSP_HEADER_OFFSET + FSP_FRAG_N_USED
+ + header->frame, frag_n_used - 1);
}
if (!xdes_get_n_used(descr)) {
/* The extent has become free: move it to another list */
- flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
- mtr);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_FREE_FRAG,
+ xdes, xoffset, mtr);
fsp_free_extent(space, offset, mtr);
}
}
/** Return an extent to the free list of a space.
-@param[in,out] space tablespace
-@param[in] offset page number in the extent
-@param[in,out] mtr mini-transaction */
+@param[in,out] space tablespace
+@param[in] offset page number in the extent
+@param[in,out] mtr mini-transaction */
static void fsp_free_extent(fil_space_t* space, page_no_t offset, mtr_t* mtr)
{
- fsp_header_t* header;
- xdes_t* descr;
-
- ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
-
- header = fsp_get_space_header(space, mtr);
+ ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
- descr = xdes_get_descriptor_with_space_hdr(
- header, space, offset, mtr);
+ buf_block_t *block= fsp_get_header(space, mtr);
+ buf_block_t *xdes;
- ut_a(xdes_get_state(descr, mtr) != XDES_FREE);
+ xdes_t* descr= xdes_get_descriptor_with_space_hdr(block, space, offset,
+ &xdes, mtr);
+ ut_a(xdes_get_state(descr, mtr) != XDES_FREE);
- xdes_init(descr, mtr);
+ xdes_init(*xdes, descr, mtr);
- flst_add_last(header + FSP_FREE, descr + XDES_FLST_NODE, mtr);
- space->free_len++;
+ flst_add_last(block, FSP_HEADER_OFFSET + FSP_FREE,
+ xdes, XDES_FLST_NODE + uint16_t(descr - xdes->frame), mtr);
+ space->free_len++;
}
/** @return Number of segment inodes which fit on a single page */
@@ -1407,79 +1435,63 @@ fsp_seg_inode_page_find_free(const page_t* page, ulint i, ulint physical_size)
}
/** Allocate a file segment inode page.
-@param[in,out] space tablespace
-@param[in,out] space_header tablespace header
-@param[in,out] mtr mini-transaction
+@param[in,out] space tablespace
+@param[in,out] header tablespace header
+@param[in,out] mtr mini-transaction
@return whether the allocation succeeded */
MY_ATTRIBUTE((nonnull, warn_unused_result))
static
bool
-fsp_alloc_seg_inode_page(
- fil_space_t* space,
- fsp_header_t* space_header,
- mtr_t* mtr)
+fsp_alloc_seg_inode_page(fil_space_t *space, buf_block_t *header, mtr_t *mtr)
{
- buf_block_t* block;
-
- ut_ad(page_offset(space_header) == FSP_HEADER_OFFSET);
- ut_ad(page_get_space_id(page_align(space_header)) == space->id);
-
- block = fsp_alloc_free_page(space, 0, RW_SX_LATCH, mtr, mtr);
-
- if (block == NULL) {
+ ut_ad(header->page.id.space() == space->id);
+ buf_block_t *block= fsp_alloc_free_page(space, 0, RW_SX_LATCH, mtr, mtr);
- return(false);
- }
+ if (!block)
+ return false;
- buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
- ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1);
+ buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
+ ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1);
- mlog_write_ulint(block->frame + FIL_PAGE_TYPE, FIL_PAGE_INODE,
- MLOG_2BYTES, mtr);
+ mtr->write<2>(*block, block->frame + FIL_PAGE_TYPE, FIL_PAGE_INODE);
#ifdef UNIV_DEBUG
- const byte* inode = FSEG_ID + FSEG_ARR_OFFSET + block->frame;
- for (ulint i = FSP_SEG_INODES_PER_PAGE(space->physical_size()); i--;
- inode += FSEG_INODE_SIZE) {
- ut_ad(!mach_read_from_8(inode));
- }
+ const byte *inode= FSEG_ID + FSEG_ARR_OFFSET + block->frame;
+ for (ulint i= FSP_SEG_INODES_PER_PAGE(space->physical_size()); i--;
+ inode += FSEG_INODE_SIZE)
+ ut_ad(!mach_read_from_8(inode));
#endif
- flst_add_last(
- space_header + FSP_SEG_INODES_FREE,
- block->frame + FSEG_INODE_PAGE_NODE, mtr);
-
- return(true);
+ flst_add_last(header, FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE,
+ block, FSEG_INODE_PAGE_NODE, mtr);
+ return true;
}
/** Allocate a file segment inode.
-@param[in,out] space tablespace
-@param[in,out] space_header tablespace header
-@param[in,out] mtr mini-transaction
+@param[in,out] space tablespace
+@param[in,out] header tablespace header
+@param[out] iblock segment inode page
+@param[in,out] mtr mini-transaction
@return segment inode
@retval NULL if not enough space */
MY_ATTRIBUTE((nonnull, warn_unused_result))
-static
-fseg_inode_t*
-fsp_alloc_seg_inode(
- fil_space_t* space,
- fsp_header_t* space_header,
- mtr_t* mtr)
+static fseg_inode_t*
+fsp_alloc_seg_inode(fil_space_t *space, buf_block_t *header,
+ buf_block_t **iblock, mtr_t *mtr)
{
buf_block_t* block;
- page_t* page;
fseg_inode_t* inode;
- ut_ad(page_offset(space_header) == FSP_HEADER_OFFSET);
-
/* Allocate a new segment inode page if needed. */
- if (flst_get_len(space_header + FSP_SEG_INODES_FREE) == 0
- && !fsp_alloc_seg_inode_page(space, space_header, mtr)) {
+ if (!flst_get_len(FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE
+ + header->frame)
+ && !fsp_alloc_seg_inode_page(space, header, mtr)) {
return(NULL);
}
const page_id_t page_id(
space->id,
- flst_get_first(space_header + FSP_SEG_INODES_FREE).page);
+ flst_get_first(FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE
+ + header->frame).page);
block = buf_page_get(page_id, space->zip_size(), RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
@@ -1487,79 +1499,68 @@ fsp_alloc_seg_inode(
fil_block_check_type(*block, FIL_PAGE_INODE, mtr);
}
- page = buf_block_get_frame(block);
-
const ulint physical_size = space->physical_size();
- ulint n = fsp_seg_inode_page_find_free(page, 0, physical_size);
+ ulint n = fsp_seg_inode_page_find_free(block->frame, 0, physical_size);
ut_a(n < FSP_SEG_INODES_PER_PAGE(physical_size));
- inode = fsp_seg_inode_page_get_nth_inode(page, n);
+ inode = fsp_seg_inode_page_get_nth_inode(block->frame, n);
- if (ULINT_UNDEFINED == fsp_seg_inode_page_find_free(page, n + 1,
+ if (ULINT_UNDEFINED == fsp_seg_inode_page_find_free(block->frame,
+ n + 1,
physical_size)) {
/* There are no other unused headers left on the page: move it
to another list */
-
- flst_remove(space_header + FSP_SEG_INODES_FREE,
- page + FSEG_INODE_PAGE_NODE, mtr);
-
- flst_add_last(space_header + FSP_SEG_INODES_FULL,
- page + FSEG_INODE_PAGE_NODE, mtr);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE,
+ block, FSEG_INODE_PAGE_NODE, mtr);
+ flst_add_last(header, FSP_HEADER_OFFSET + FSP_SEG_INODES_FULL,
+ block, FSEG_INODE_PAGE_NODE, mtr);
}
ut_ad(!mach_read_from_8(inode + FSEG_ID)
|| mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
+ *iblock = block;
return(inode);
}
/** Frees a file segment inode.
@param[in,out] space tablespace
@param[in,out] inode segment inode
+@param[in,out] iblock segment inode page
@param[in,out] mtr mini-transaction */
static void fsp_free_seg_inode(
fil_space_t* space,
fseg_inode_t* inode,
+ buf_block_t* iblock,
mtr_t* mtr)
{
- page_t* page;
- fsp_header_t* space_header;
-
ut_d(space->modify_check(*mtr));
- page = page_align(inode);
-
- space_header = fsp_get_space_header(space, mtr);
+ buf_block_t* header = fsp_get_header(space, mtr);
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
const ulint physical_size = space->physical_size();
if (ULINT_UNDEFINED
- == fsp_seg_inode_page_find_free(page, 0, physical_size)) {
-
+ == fsp_seg_inode_page_find_free(iblock->frame, 0, physical_size)) {
/* Move the page to another list */
-
- flst_remove(space_header + FSP_SEG_INODES_FULL,
- page + FSEG_INODE_PAGE_NODE, mtr);
-
- flst_add_last(space_header + FSP_SEG_INODES_FREE,
- page + FSEG_INODE_PAGE_NODE, mtr);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_SEG_INODES_FULL,
+ iblock, FSEG_INODE_PAGE_NODE, mtr);
+ flst_add_last(header, FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE,
+ iblock, FSEG_INODE_PAGE_NODE, mtr);
}
- mlog_write_ull(inode + FSEG_ID, 0, mtr);
- mlog_write_ulint(inode + FSEG_MAGIC_N, 0xfa051ce3, MLOG_4BYTES, mtr);
+ mtr->write<8>(*iblock, inode + FSEG_ID, 0U);
+ mtr->write<4>(*iblock, inode + FSEG_MAGIC_N, 0xfa051ce3);
if (ULINT_UNDEFINED
- == fsp_seg_inode_page_find_used(page, physical_size)) {
-
+ == fsp_seg_inode_page_find_used(iblock->frame, physical_size)) {
/* There are no other used headers left on the page: free it */
-
- flst_remove(space_header + FSP_SEG_INODES_FREE,
- page + FSEG_INODE_PAGE_NODE, mtr);
-
- fsp_free_page(space, page_get_page_no(page), true, mtr);
+ flst_remove(header, FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE,
+ iblock, FSEG_INODE_PAGE_NODE, mtr);
+ fsp_free_page(space, iblock->page.id.page_no(), true, mtr);
}
}
@@ -1642,24 +1643,21 @@ fseg_get_nth_frag_page_no(
+ n * FSEG_FRAG_SLOT_SIZE));
}
-/**********************************************************************//**
-Sets the page number in the nth fragment page slot. */
-UNIV_INLINE
-void
-fseg_set_nth_frag_page_no(
-/*======================*/
- fseg_inode_t* inode, /*!< in: segment inode */
- ulint n, /*!< in: slot index */
- ulint page_no,/*!< in: page number to set */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+/** Set the page number in the nth fragment page slot.
+@param[in,out] inode segment inode
+@param[in,out] iblock segment inode page
+@param[in] n slot index
+@param[in] page_no page number to set
+@param[in,out] mtr mini-transaction */
+inline void fseg_set_nth_frag_page_no(fseg_inode_t *inode, buf_block_t *iblock,
+ ulint n, ulint page_no, mtr_t *mtr)
{
- ut_ad(inode && mtr);
- ut_ad(n < FSEG_FRAG_ARR_N_SLOTS);
- ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
+ ut_ad(n < FSEG_FRAG_ARR_N_SLOTS);
+ ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
- mlog_write_ulint(inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE,
- page_no, MLOG_4BYTES, mtr);
+ mtr->write<4>(*iblock, inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE,
+ page_no);
}
/**********************************************************************//**
@@ -1762,11 +1760,9 @@ fseg_create(
no need to do the check for this individual
operation */
{
- fsp_header_t* space_header;
fseg_inode_t* inode;
ib_id_t seg_id;
buf_block_t* block = 0; /* remove warning */
- fseg_header_t* header = 0; /* remove warning */
ulint n_reserved;
DBUG_ENTER("fseg_create");
@@ -1782,9 +1778,6 @@ fseg_create(
block = buf_page_get(page_id_t(space->id, page),
space->zip_size(),
RW_SX_LATCH, mtr);
-
- header = byte_offset + buf_block_get_frame(block);
-
if (!space->full_crc32()) {
fil_block_check_type(*block, space->id == TRX_SYS_SPACE
&& page == TRX_SYS_PAGE_NO
@@ -1800,9 +1793,10 @@ fseg_create(
DBUG_RETURN(NULL);
}
- space_header = fsp_get_space_header(space, mtr);
+ buf_block_t* header = fsp_get_header(space, mtr);
+ buf_block_t* iblock;
- inode = fsp_alloc_seg_inode(space, space_header, mtr);
+ inode = fsp_alloc_seg_inode(space, header, &iblock, mtr);
if (inode == NULL) {
goto funct_exit;
@@ -1811,26 +1805,28 @@ fseg_create(
/* Read the next segment id from space header and increment the
value in space header */
- seg_id = mach_read_from_8(space_header + FSP_SEG_ID);
+ seg_id = mach_read_from_8(FSP_HEADER_OFFSET + FSP_SEG_ID
+ + header->frame);
- mlog_write_ull(space_header + FSP_SEG_ID, seg_id + 1, mtr);
- mlog_write_ull(inode + FSEG_ID, seg_id, mtr);
+ mtr->write<8>(*header, FSP_HEADER_OFFSET + FSP_SEG_ID + header->frame,
+ seg_id + 1);
+ mtr->write<8>(*iblock, inode + FSEG_ID, seg_id);
ut_ad(!mach_read_from_4(inode + FSEG_NOT_FULL_N_USED));
- flst_init(inode + FSEG_FREE, mtr);
- flst_init(inode + FSEG_NOT_FULL, mtr);
- flst_init(inode + FSEG_FULL, mtr);
+ flst_init(*iblock, inode + FSEG_FREE, mtr);
+ flst_init(*iblock, inode + FSEG_NOT_FULL, mtr);
+ flst_init(*iblock, inode + FSEG_FULL, mtr);
- mlog_write_ulint(inode + FSEG_MAGIC_N, FSEG_MAGIC_N_VALUE,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*iblock, inode + FSEG_MAGIC_N, FSEG_MAGIC_N_VALUE);
compile_time_assert(FSEG_FRAG_SLOT_SIZE == 4);
compile_time_assert(FIL_NULL == 0xffffffff);
- mlog_memset(inode + FSEG_FRAG_ARR,
+ mlog_memset(iblock, uint16_t(inode - iblock->frame) + FSEG_FRAG_ARR,
FSEG_FRAG_SLOT_SIZE * FSEG_FRAG_ARR_N_SLOTS, 0xff, mtr);
if (page == 0) {
block = fseg_alloc_free_page_low(space,
- inode, 0, FSP_UP, RW_SX_LATCH,
+ inode, iblock, 0, FSP_UP,
+ RW_SX_LATCH,
mtr, mtr
#ifdef UNIV_DEBUG
, has_done_reservation
@@ -1842,25 +1838,24 @@ fseg_create(
ut_ad(!has_done_reservation || block != NULL);
if (block == NULL) {
- fsp_free_seg_inode(space, inode, mtr);
+ fsp_free_seg_inode(space, inode, iblock, mtr);
goto funct_exit;
}
ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1);
- header = byte_offset + buf_block_get_frame(block);
- mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE,
- FIL_PAGE_TYPE_SYS, MLOG_2BYTES, mtr);
+ mtr->write<2>(*block, block->frame + FIL_PAGE_TYPE,
+ FIL_PAGE_TYPE_SYS);
}
- mlog_write_ulint(header + FSEG_HDR_OFFSET,
- page_offset(inode), MLOG_2BYTES, mtr);
+ mtr->write<2>(*block, byte_offset + FSEG_HDR_OFFSET
+ + block->frame, page_offset(inode));
- mlog_write_ulint(header + FSEG_HDR_PAGE_NO,
- page_get_page_no(page_align(inode)),
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*block, byte_offset + FSEG_HDR_PAGE_NO
+ + block->frame, page_get_page_no(page_align(inode)));
- mlog_write_ulint(header + FSEG_HDR_SPACE, space->id, MLOG_4BYTES, mtr);
+ mtr->write<4,mtr_t::OPT>(*block, byte_offset + FSEG_HDR_SPACE
+ + block->frame, space->id);
funct_exit:
if (!has_done_reservation) {
@@ -1930,18 +1925,19 @@ fseg_n_reserved_pages(
This happens if the segment is big enough to allow extents in the free list,
the free list is empty, and the extents can be allocated consecutively from
the hint onward.
-@param[in] inode segment inode
-@param[in] space tablespace
-@param[in] hint hint which extent would be good as the first
-extent
-@param[in,out] mtr mini-transaction */
+@param[in,out] inode segment inode
+@param[in,out] iblock segment inode page
+@param[in] space tablespace
+@param[in] hint hint which extent would be good as the first extent
+@param[in,out] mtr mini-transaction */
static
void
fseg_fill_free_list(
- fseg_inode_t* inode,
- fil_space_t* space,
- ulint hint,
- mtr_t* mtr)
+ fseg_inode_t* inode,
+ buf_block_t* iblock,
+ fil_space_t* space,
+ ulint hint,
+ mtr_t* mtr)
{
xdes_t* descr;
ulint i;
@@ -1969,7 +1965,8 @@ fseg_fill_free_list(
}
for (i = 0; i < FSEG_FREE_LIST_MAX_LEN; i++) {
- descr = xdes_get_descriptor(space, hint, mtr);
+ buf_block_t* xdes;
+ descr = xdes_get_descriptor(space, hint, &xdes, mtr);
if ((descr == NULL)
|| (XDES_FREE != xdes_get_state(descr, mtr))) {
@@ -1979,16 +1976,19 @@ fseg_fill_free_list(
return;
}
- descr = fsp_alloc_free_extent(space, hint, mtr);
+ descr = fsp_alloc_free_extent(space, hint, &xdes, mtr);
- xdes_set_state(descr, XDES_FSEG, mtr);
+ xdes_set_state(*xdes, descr, XDES_FSEG, mtr);
seg_id = mach_read_from_8(inode + FSEG_ID);
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
- mlog_write_ull(descr + XDES_ID, seg_id, mtr);
+ mtr->write<8>(*xdes, descr + XDES_ID, seg_id);
- flst_add_last(inode + FSEG_FREE, descr + XDES_FLST_NODE, mtr);
+ flst_add_last(iblock, FSEG_FREE
+ + uint16_t(inode - iblock->frame),
+ xdes, XDES_FLST_NODE
+ + uint16_t(descr - xdes->frame), mtr);
hint += FSP_EXTENT_SIZE;
}
}
@@ -1997,7 +1997,9 @@ fseg_fill_free_list(
the segment, then tries to allocate from the space free list.
NOTE that the extent returned still resides in the segment free list, it is
not yet taken off it!
-@param[in] inode segment inode
+@param[in,out] inode segment inode
+@param[in,out] iblock segment inode page
+@param[out] xdes extent descriptor page
@param[in,out] space tablespace
@param[in,out] mtr mini-transaction
@retval NULL if no page could be allocated
@@ -2008,6 +2010,8 @@ static
xdes_t*
fseg_alloc_free_extent(
fseg_inode_t* inode,
+ buf_block_t* iblock,
+ buf_block_t** xdes,
fil_space_t* space,
mtr_t* mtr)
{
@@ -2024,10 +2028,10 @@ fseg_alloc_free_extent(
first = flst_get_first(inode + FSEG_FREE);
- descr = xdes_lst_get_descriptor(space, first, mtr);
+ descr = xdes_lst_get_descriptor(space, first, xdes, mtr);
} else {
/* Segment free list was empty, allocate from space */
- descr = fsp_alloc_free_extent(space, 0, mtr);
+ descr = fsp_alloc_free_extent(space, 0, xdes, mtr);
if (descr == NULL) {
@@ -2036,12 +2040,15 @@ fseg_alloc_free_extent(
seg_id = mach_read_from_8(inode + FSEG_ID);
- xdes_set_state(descr, XDES_FSEG, mtr);
- mlog_write_ull(descr + XDES_ID, seg_id, mtr);
- flst_add_last(inode + FSEG_FREE, descr + XDES_FLST_NODE, mtr);
+ xdes_set_state(**xdes, descr, XDES_FSEG, mtr);
+ mtr->write<8,mtr_t::OPT>(**xdes, descr + XDES_ID, seg_id);
+ flst_add_last(iblock, FSEG_FREE
+ + uint16_t(inode - iblock->frame),
+ *xdes, XDES_FLST_NODE
+ + uint16_t(descr - (*xdes)->frame), mtr);
/* Try to fill the segment free list */
- fseg_fill_free_list(inode, space,
+ fseg_fill_free_list(inode, iblock, space,
xdes_get_offset(descr) + FSP_EXTENT_SIZE,
mtr);
}
@@ -2054,6 +2061,7 @@ This function implements the intelligent allocation strategy which tries to
minimize file space fragmentation.
@param[in,out] space tablespace
@param[in,out] seg_inode segment inode
+@param[in,out] iblock segment inode page
@param[in] hint hint of which page would be desirable
@param[in] direction if the new page is needed because of
an index page split, and records are inserted there in order, into which
@@ -2074,6 +2082,7 @@ buf_block_t*
fseg_alloc_free_page_low(
fil_space_t* space,
fseg_inode_t* seg_inode,
+ buf_block_t* iblock,
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
@@ -2084,7 +2093,6 @@ fseg_alloc_free_page_low(
#endif /* UNIV_DEBUG */
)
{
- fsp_header_t* space_header;
ib_id_t seg_id;
ulint used;
ulint reserved;
@@ -2092,6 +2100,7 @@ fseg_alloc_free_page_low(
ulint ret_page; /*!< the allocated page offset, FIL_NULL
if could not be allocated */
xdes_t* ret_descr; /*!< the extent of the allocated page */
+ buf_block_t* xdes;
ulint n;
const ulint space_id = space->id;
@@ -2107,16 +2116,16 @@ fseg_alloc_free_page_low(
reserved = fseg_n_reserved_pages_low(seg_inode, &used, mtr);
- space_header = fsp_get_space_header(space, mtr);
+ buf_block_t* header = fsp_get_header(space, mtr);
- descr = xdes_get_descriptor_with_space_hdr(space_header, space,
- hint, mtr);
+ descr = xdes_get_descriptor_with_space_hdr(header, space, hint,
+ &xdes, mtr);
if (descr == NULL) {
/* Hint outside space or too high above free limit: reset
hint */
/* The file space header page is always allocated. */
hint = 0;
- descr = xdes_get_descriptor(space, hint, mtr);
+ descr = xdes_get_descriptor(space, hint, &xdes, mtr);
}
/* In the big if-else below we look for ret_page and ret_descr */
@@ -2142,26 +2151,28 @@ take_hinted_page:
=========================================================
the hinted page
===============*/
- ret_descr = fsp_alloc_free_extent(space, hint, mtr);
+ ret_descr = fsp_alloc_free_extent(space, hint, &xdes, mtr);
ut_a(ret_descr == descr);
- xdes_set_state(ret_descr, XDES_FSEG, mtr);
- mlog_write_ull(ret_descr + XDES_ID, seg_id, mtr);
- flst_add_last(seg_inode + FSEG_FREE,
- ret_descr + XDES_FLST_NODE, mtr);
+ xdes_set_state(*xdes, ret_descr, XDES_FSEG, mtr);
+ mtr->write<8,mtr_t::OPT>(*xdes, ret_descr + XDES_ID, seg_id);
+ flst_add_last(iblock, FSEG_FREE
+ + uint16_t(seg_inode - iblock->frame),
+ xdes, XDES_FLST_NODE
+ + uint16_t(ret_descr - xdes->frame), mtr);
/* Try to fill the segment free list */
- fseg_fill_free_list(seg_inode, space,
+ fseg_fill_free_list(seg_inode, iblock, space,
hint + FSP_EXTENT_SIZE, mtr);
goto take_hinted_page;
/*-----------------------------------------------------------*/
} else if ((direction != FSP_NO_DIR)
&& ((reserved - used) < reserved / FSEG_FILLFACTOR)
&& (used >= FSEG_FRAG_LIMIT)
- && (!!(ret_descr
- = fseg_alloc_free_extent(seg_inode, space, mtr)))) {
-
+ && !!(ret_descr = fseg_alloc_free_extent(seg_inode, iblock,
+ &xdes, space,
+ mtr))) {
/* 3. We take any free extent (which was already assigned above
===============================================================
in the if-condition to ret_descr) and take the lowest or
@@ -2204,7 +2215,7 @@ take_hinted_page:
return(NULL);
}
- ret_descr = xdes_lst_get_descriptor(space, first, mtr);
+ ret_descr = xdes_lst_get_descriptor(space, first, &xdes, mtr);
ret_page = xdes_get_offset(ret_descr)
+ xdes_find_free(ret_descr);
ut_ad(!has_done_reservation || ret_page != FIL_NULL);
@@ -2224,7 +2235,7 @@ take_hinted_page:
ut_a(n != ULINT_UNDEFINED);
fseg_set_nth_frag_page_no(
- seg_inode, n, block->page.id.page_no(),
+ seg_inode, iblock, n, block->page.id.page_no(),
mtr);
}
@@ -2235,7 +2246,8 @@ take_hinted_page:
} else {
/* 7. We allocate a new extent and take its first page
======================================================*/
- ret_descr = fseg_alloc_free_extent(seg_inode, space, mtr);
+ ret_descr = fseg_alloc_free_extent(seg_inode, iblock, &xdes,
+ space, mtr);
if (ret_descr == NULL) {
ret_page = FIL_NULL;
@@ -2268,7 +2280,7 @@ take_hinted_page:
}
if (!fsp_try_extend_data_file_with_pages(
- space, ret_page, space_header, mtr)) {
+ space, ret_page, header, mtr)) {
/* No disk space left */
ut_ad(!has_done_reservation);
return(NULL);
@@ -2283,10 +2295,14 @@ got_hinted_page:
The extent is still in the appropriate list (FSEG_NOT_FULL
or FSEG_FREE), and the page is not yet marked as used. */
- ut_ad(xdes_get_descriptor(space, ret_page, mtr) == ret_descr);
+ ut_d(buf_block_t* xxdes);
+ ut_ad(xdes_get_descriptor(space, ret_page, &xxdes, mtr)
+ == ret_descr);
+ ut_ad(xdes == xxdes);
ut_ad(xdes_is_free(ret_descr, ret_page % FSP_EXTENT_SIZE));
- fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr);
+ fseg_mark_page_used(seg_inode, iblock, ret_page, ret_descr,
+ xdes, mtr);
}
return fsp_page_create(space, ret_page, rw_latch, mtr, init_mtr);
@@ -2344,7 +2360,7 @@ fseg_alloc_free_page_general(
}
block = fseg_alloc_free_page_low(space,
- inode, hint, direction,
+ inode, iblock, hint, direction,
RW_X_LATCH, mtr, init_mtr
#ifdef UNIV_DEBUG
, has_done_reservation
@@ -2367,19 +2383,18 @@ of a single-table tablespace, and they are also physically initialized to
the data file. That is we have already extended the data file so that those
pages are inside the data file. If not, this function extends the tablespace
with pages.
-@param[in,out] space tablespace
-@param[in,out] space_header tablespace header, x-latched
-@param[in] size size of the tablespace in pages,
-must be less than FSP_EXTENT_SIZE
-@param[in,out] mtr mini-transaction
-@param[in] n_pages number of pages to reserve
+@param[in,out] space tablespace
+@param[in,out] header tablespace header, x-latched
+@param[in] size tablespace size in pages, less than FSP_EXTENT_SIZE
+@param[in,out] mtr mini-transaction
+@param[in] n_pages number of pages to reserve
@return true if there were at least n_pages free pages, or we were able
to extend */
static
bool
fsp_reserve_free_pages(
fil_space_t* space,
- fsp_header_t* space_header,
+ buf_block_t* header,
ulint size,
mtr_t* mtr,
ulint n_pages)
@@ -2390,15 +2405,16 @@ fsp_reserve_free_pages(
ut_a(!is_system_tablespace(space->id));
ut_a(size < FSP_EXTENT_SIZE);
- descr = xdes_get_descriptor_with_space_hdr(
- space_header, space, 0, mtr);
+ buf_block_t* xdes;
+ descr = xdes_get_descriptor_with_space_hdr(header, space, 0, &xdes,
+ mtr);
n_used = xdes_get_n_used(descr);
ut_a(n_used <= size);
return(size >= n_used + n_pages
|| fsp_try_extend_data_file_with_pages(
- space, n_used + n_pages - 1, space_header, mtr));
+ space, n_used + n_pages - 1, header, mtr));
}
/** Reserves free pages from a tablespace. All mini-transactions which may
@@ -2448,7 +2464,6 @@ fsp_reserve_free_extents(
mtr_t* mtr,
ulint n_pages)
{
- fsp_header_t* space_header;
ulint n_free_list_ext;
ulint free_limit;
ulint size;
@@ -2463,22 +2478,24 @@ fsp_reserve_free_extents(
mtr_x_lock_space(space, mtr);
const ulint physical_size = space->physical_size();
- space_header = fsp_get_space_header(space, mtr);
+ buf_block_t* header = fsp_get_header(space, mtr);
try_again:
- size = mach_read_from_4(space_header + FSP_SIZE);
+ size = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE + header->frame);
ut_ad(size == space->size_in_header);
if (size < FSP_EXTENT_SIZE && n_pages < FSP_EXTENT_SIZE / 2) {
/* Use different rules for small single-table tablespaces */
*n_reserved = 0;
- return(fsp_reserve_free_pages(space, space_header, size,
+ return(fsp_reserve_free_pages(space, header, size,
mtr, n_pages));
}
- n_free_list_ext = flst_get_len(space_header + FSP_FREE);
+ n_free_list_ext = flst_get_len(FSP_HEADER_OFFSET + FSP_FREE
+ + header->frame);
ut_ad(space->free_len == n_free_list_ext);
- free_limit = mach_read_from_4(space_header + FSP_FREE_LIMIT);
+ free_limit = mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ + header->frame);
ut_ad(space->free_limit == free_limit);
/* Below we play safe when counting free extents above the free limit:
@@ -2534,7 +2551,7 @@ try_again:
return(true);
}
try_to_extend:
- if (ulint n = fsp_try_extend_data_file(space, space_header, mtr)) {
+ if (ulint n = fsp_try_extend_data_file(space, header, mtr)) {
total_reserved += n;
goto try_again;
}
@@ -2542,58 +2559,6 @@ try_to_extend:
return(false);
}
-/********************************************************************//**
-Marks a page used. The page must reside within the extents of the given
-segment. */
-static MY_ATTRIBUTE((nonnull))
-void
-fseg_mark_page_used(
-/*================*/
- fseg_inode_t* seg_inode,/*!< in: segment inode */
- ulint page, /*!< in: page offset */
- xdes_t* descr, /*!< in: extent descriptor */
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- ulint not_full_n_used;
-
- ut_ad(fil_page_get_type(page_align(seg_inode)) == FIL_PAGE_INODE);
- ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
- ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
- == FSEG_MAGIC_N_VALUE);
- ut_ad(!memcmp(seg_inode + FSEG_ID, descr + XDES_ID, 4));
-
- if (!xdes_get_n_used(descr)) {
- /* We move the extent from the free list to the
- NOT_FULL list */
- flst_remove(seg_inode + FSEG_FREE, descr + XDES_FLST_NODE,
- mtr);
- flst_add_last(seg_inode + FSEG_NOT_FULL,
- descr + XDES_FLST_NODE, mtr);
- }
-
- ut_ad(xdes_is_free(descr, page % FSP_EXTENT_SIZE));
-
- /* We mark the page as used */
- xdes_set_free<false>(descr, page % FSP_EXTENT_SIZE, mtr);
-
- not_full_n_used = mach_read_from_4(seg_inode + FSEG_NOT_FULL_N_USED);
- not_full_n_used++;
- mlog_write_ulint(seg_inode + FSEG_NOT_FULL_N_USED, not_full_n_used,
- MLOG_4BYTES, mtr);
- if (xdes_is_full(descr)) {
- /* We move the extent from the NOT_FULL list to the
- FULL list */
- flst_remove(seg_inode + FSEG_NOT_FULL,
- descr + XDES_FLST_NODE, mtr);
- flst_add_last(seg_inode + FSEG_FULL,
- descr + XDES_FLST_NODE, mtr);
-
- mlog_write_ulint(seg_inode + FSEG_NOT_FULL_N_USED,
- not_full_n_used - FSP_EXTENT_SIZE,
- MLOG_4BYTES, mtr);
- }
-}
-
/** Frees a single page of a segment.
@param[in] seg_inode segment inode
@param[in,out] space tablespace
@@ -2606,6 +2571,7 @@ static
void
fseg_free_page_low(
fseg_inode_t* seg_inode,
+ buf_block_t* iblock,
fil_space_t* space,
page_no_t offset,
#ifdef BTR_CUR_HASH_ADAPT
@@ -2614,9 +2580,6 @@ fseg_free_page_low(
bool log,
mtr_t* mtr)
{
- xdes_t* descr;
- ulint not_full_n_used;
- ulint state;
ib_id_t descr_id;
ib_id_t seg_id;
@@ -2636,7 +2599,8 @@ fseg_free_page_low(
}
#endif /* BTR_CUR_HASH_ADAPT */
- descr = xdes_get_descriptor(space, offset, mtr);
+ buf_block_t* xdes;
+ xdes_t* descr = xdes_get_descriptor(space, offset, &xdes, mtr);
if (xdes_is_free(descr, offset % FSP_EXTENT_SIZE)) {
ib::fatal() << "InnoDB is trying to free page "
@@ -2648,9 +2612,7 @@ fseg_free_page_low(
<< FORCE_RECOVERY_MSG;
}
- state = xdes_get_state(descr, mtr);
-
- if (state != XDES_FSEG) {
+ if (xdes_get_state(descr, mtr) != XDES_FSEG) {
/* The page is in the fragment pages of the segment */
for (ulint i = 0;; i++) {
if (fseg_get_nth_frag_page_no(seg_inode, i, mtr)
@@ -2688,30 +2650,32 @@ fseg_free_page_low(
<< FORCE_RECOVERY_MSG;
}
- not_full_n_used = mach_read_from_4(seg_inode + FSEG_NOT_FULL_N_USED);
+ byte* p_not_full = seg_inode + FSEG_NOT_FULL_N_USED;
+ uint32_t not_full_n_used = mach_read_from_4(p_not_full);
+ const uint16_t xoffset= XDES_FLST_NODE + uint16_t(descr - xdes->frame);
+ const uint16_t ioffset= uint16_t(seg_inode - iblock->frame);
+
if (xdes_is_full(descr)) {
/* The fragment is full: move it to another list */
- flst_remove(seg_inode + FSEG_FULL,
- descr + XDES_FLST_NODE, mtr);
- flst_add_last(seg_inode + FSEG_NOT_FULL,
- descr + XDES_FLST_NODE, mtr);
- mlog_write_ulint(seg_inode + FSEG_NOT_FULL_N_USED,
- not_full_n_used + FSP_EXTENT_SIZE - 1,
- MLOG_4BYTES, mtr);
+ flst_remove(iblock, FSEG_FULL + ioffset, xdes, xoffset, mtr);
+ flst_add_last(iblock, FSEG_NOT_FULL + ioffset, xdes, xoffset,
+ mtr);
+ not_full_n_used += FSP_EXTENT_SIZE - 1;
} else {
ut_a(not_full_n_used > 0);
- mlog_write_ulint(seg_inode + FSEG_NOT_FULL_N_USED,
- not_full_n_used - 1, MLOG_4BYTES, mtr);
+ not_full_n_used--;
}
+ mtr->write<4>(*iblock, p_not_full, not_full_n_used);
+
const ulint bit = offset % FSP_EXTENT_SIZE;
- xdes_set_free<true>(descr, bit, mtr);
+ xdes_set_free<true>(*xdes, descr, bit, mtr);
if (!xdes_get_n_used(descr)) {
/* The extent has become free: free it to space */
- flst_remove(seg_inode + FSEG_NOT_FULL,
- descr + XDES_FLST_NODE, mtr);
+ flst_remove(iblock, FSEG_NOT_FULL + ioffset, xdes, xoffset,
+ mtr);
fsp_free_extent(space, offset, mtr);
}
}
@@ -2755,7 +2719,7 @@ fseg_free_page_func(
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
}
- fseg_free_page_low(seg_inode, space, offset, ahi, log, mtr);
+ fseg_free_page_low(seg_inode, iblock, space, offset, ahi, log, mtr);
ut_d(buf_page_set_file_page_was_freed(page_id_t(space->id, offset)));
@@ -2802,6 +2766,7 @@ static
void
fseg_free_extent(
fseg_inode_t* seg_inode,
+ buf_block_t* iblock,
fil_space_t* space,
ulint page,
#ifdef BTR_CUR_HASH_ADAPT
@@ -2810,13 +2775,11 @@ fseg_free_extent(
mtr_t* mtr)
{
ulint first_page_in_extent;
- xdes_t* descr;
- ulint not_full_n_used;
- ulint descr_n_used;
ut_ad(mtr != NULL);
- descr = xdes_get_descriptor(space, page, mtr);
+ buf_block_t* xdes;
+ xdes_t* descr = xdes_get_descriptor(space, page, &xdes, mtr);
ut_a(xdes_get_state(descr, mtr) == XDES_FSEG);
ut_a(!memcmp(descr + XDES_ID, seg_inode + FSEG_ID, 8));
@@ -2842,23 +2805,23 @@ fseg_free_extent(
}
#endif /* BTR_CUR_HASH_ADAPT */
+ const uint16_t xoffset= XDES_FLST_NODE + uint16_t(descr - xdes->frame);
+ const uint16_t ioffset= uint16_t(seg_inode - iblock->frame);
+
if (xdes_is_full(descr)) {
- flst_remove(seg_inode + FSEG_FULL,
- descr + XDES_FLST_NODE, mtr);
+ flst_remove(iblock, FSEG_FULL + ioffset, xdes, xoffset, mtr);
} else if (!xdes_get_n_used(descr)) {
- flst_remove(seg_inode + FSEG_FREE,
- descr + XDES_FLST_NODE, mtr);
+ flst_remove(iblock, FSEG_FREE + ioffset, xdes, xoffset, mtr);
} else {
- flst_remove(seg_inode + FSEG_NOT_FULL,
- descr + XDES_FLST_NODE, mtr);
+ flst_remove(iblock, FSEG_NOT_FULL + ioffset, xdes, xoffset,
+ mtr);
- not_full_n_used = mach_read_from_4(FSEG_NOT_FULL_N_USED
- + seg_inode);
- descr_n_used = xdes_get_n_used(descr);
+ ulint not_full_n_used = mach_read_from_4(
+ FSEG_NOT_FULL_N_USED + seg_inode);
+ ulint descr_n_used = xdes_get_n_used(descr);
ut_a(not_full_n_used >= descr_n_used);
- mlog_write_ulint(seg_inode + FSEG_NOT_FULL_N_USED,
- not_full_n_used - descr_n_used,
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*iblock, seg_inode + FSEG_NOT_FULL_N_USED,
+ not_full_n_used - descr_n_used);
}
fsp_free_extent(space, page, mtr);
@@ -2873,8 +2836,8 @@ fseg_free_extent(
}
#ifndef BTR_CUR_HASH_ADAPT
-# define fseg_free_extent(inode, space, page, ahi, mtr) \
- fseg_free_extent(inode, space, page, mtr)
+# define fseg_free_extent(inode, iblock, space, page, ahi, mtr) \
+ fseg_free_extent(inode, iblock, space, page, mtr)
#endif /* !BTR_CUR_HASH_ADAPT */
/**********************************************************************//**
@@ -2882,8 +2845,8 @@ Frees part of a segment. This function can be used to free a segment by
repeatedly calling this function in different mini-transactions. Doing
the freeing in a single mini-transaction might result in too big a
mini-transaction.
-@return TRUE if freeing completed */
-ibool
+@return true if freeing completed */
+bool
fseg_free_step_func(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list
@@ -2897,7 +2860,6 @@ fseg_free_step_func(
{
ulint n;
ulint page;
- xdes_t* descr;
fseg_inode_t* inode;
ulint space_id;
ulint header_page;
@@ -2907,22 +2869,22 @@ fseg_free_step_func(
space_id = page_get_space_id(page_align(header));
header_page = page_get_page_no(page_align(header));
- fil_space_t* space = mtr_x_lock_space(space_id, mtr);
-
- descr = xdes_get_descriptor(space, header_page, mtr);
+ fil_space_t* space = mtr_x_lock_space(space_id, mtr);
+ buf_block_t* xdes;
+ xdes_t* descr = xdes_get_descriptor(space, header_page, &xdes, mtr);
/* Check that the header resides on a page which has not been
freed yet */
ut_a(!xdes_is_free(descr, header_page % FSP_EXTENT_SIZE));
- buf_block_t* iblock;
+ buf_block_t* iblock;
const ulint zip_size = space->zip_size();
inode = fseg_inode_try_get(header, space_id, zip_size, mtr, &iblock);
if (inode == NULL) {
ib::info() << "Double free of inode from "
<< page_id_t(space_id, header_page);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
if (!space->full_crc32()) {
@@ -2933,8 +2895,8 @@ fseg_free_step_func(
if (descr != NULL) {
/* Free the extent held by the segment */
page = xdes_get_offset(descr);
- fseg_free_extent(inode, space, page, ahi, mtr);
- DBUG_RETURN(FALSE);
+ fseg_free_extent(inode, iblock, space, page, ahi, mtr);
+ DBUG_RETURN(false);
}
/* Free a frag page */
@@ -2942,13 +2904,13 @@ fseg_free_step_func(
if (n == ULINT_UNDEFINED) {
/* Freeing completed: free the segment inode */
- fsp_free_seg_inode(space, inode, mtr);
+ fsp_free_seg_inode(space, inode, iblock, mtr);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
fseg_free_page_low(
- inode, space,
+ inode, iblock, space,
fseg_get_nth_frag_page_no(inode, n, mtr),
ahi, true, mtr);
@@ -2956,19 +2918,19 @@ fseg_free_step_func(
if (n == ULINT_UNDEFINED) {
/* Freeing completed: free the segment inode */
- fsp_free_seg_inode(space, inode, mtr);
+ fsp_free_seg_inode(space, inode, iblock, mtr);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
}
/**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed.
-@return TRUE if freeing completed, except the header page */
-ibool
+@return true if freeing completed, except the header page */
+bool
fseg_free_step_not_header_func(
fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */
@@ -3003,9 +2965,9 @@ fseg_free_step_not_header_func(
/* Free the extent held by the segment */
page = xdes_get_offset(descr);
- fseg_free_extent(inode, space, page, ahi, mtr);
+ fseg_free_extent(inode, iblock, space, page, ahi, mtr);
- return(FALSE);
+ return(false);
}
/* Free a frag page */
@@ -3020,12 +2982,12 @@ fseg_free_step_not_header_func(
if (page_no == page_get_page_no(page_align(header))) {
- return(TRUE);
+ return(true);
}
- fseg_free_page_low(inode, space, page_no, ahi, true, mtr);
+ fseg_free_page_low(inode, iblock, space, page_no, ahi, true, mtr);
- return(FALSE);
+ return(false);
}
/** Returns the first extent descriptor for a segment.
@@ -3058,10 +3020,12 @@ fseg_get_first_extent(
return(NULL);
}
- ut_ad(first.page != FIL_NULL);
+ DBUG_ASSERT(first.page != FIL_NULL);
+
+ buf_block_t *xdes;
return(first.page == FIL_NULL ? NULL
- : xdes_lst_get_descriptor(space, first, mtr));
+ : xdes_lst_get_descriptor(space, first, &xdes, mtr));
}
#ifdef UNIV_BTR_PRINT
diff --git a/storage/innobase/fut/fut0lst.cc b/storage/innobase/fut/fut0lst.cc
index f6e83f61cbc..93249aeab54 100644
--- a/storage/innobase/fut/fut0lst.cc
+++ b/storage/innobase/fut/fut0lst.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,434 +28,300 @@ Created 11/28/1995 Heikki Tuuri
#include "buf0buf.h"
#include "page0page.h"
-/********************************************************************//**
-Adds a node to an empty list. */
-static
-void
-flst_add_to_empty(
-/*==============*/
- flst_base_node_t* base, /*!< in: pointer to base node of
- empty list */
- flst_node_t* node, /*!< in: node to add */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Add a node to an empty list. */
+static void flst_add_to_empty(buf_block_t *base, uint16_t boffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
- ulint space;
- fil_addr_t node_addr;
-
- ut_ad(mtr && base && node);
- ut_ad(base != node);
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_a(!flst_get_len(base));
-
- buf_ptr_get_fsp_addr(node, &space, &node_addr);
-
- /* Update first and last fields of base node */
- flst_write_addr(base + FLST_FIRST, node_addr, mtr);
- flst_write_addr(base + FLST_LAST, node_addr, mtr);
-
- /* Set prev and next fields of node to add */
- flst_zero_addr(node + FLST_PREV, mtr);
- flst_zero_addr(node + FLST_NEXT, mtr);
-
- /* Update len of base node */
- mlog_write_ulint(base + FLST_LEN, 1, MLOG_4BYTES, mtr);
+ ut_ad(base != add || boffset != aoffset);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(aoffset < add->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ fil_addr_t addr= { add->page.id.page_no(), aoffset };
+
+ /* Update first and last fields of base node */
+ flst_write_addr(*base, base->frame + boffset + FLST_FIRST, addr, mtr);
+ /* MDEV-12353 TODO: use MEMMOVE record */
+ flst_write_addr(*base, base->frame + boffset + FLST_LAST, addr, mtr);
+
+ /* Set prev and next fields of node to add */
+ flst_zero_addr(*add, add->frame + aoffset + FLST_PREV, mtr);
+ flst_zero_addr(*add, add->frame + aoffset + FLST_NEXT, mtr);
+
+ /* Update len of base node */
+ ut_ad(!mach_read_from_4(base->frame + boffset + FLST_LEN));
+ mtr->write<1>(*base, base->frame + boffset + (FLST_LEN + 3), 1U);
}
-/********************************************************************//**
-Inserts a node after another in a list. */
-static
-void
-flst_insert_after(
-/*==============*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node1, /*!< in: node to insert after */
- flst_node_t* node2, /*!< in: node to add */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-/********************************************************************//**
-Inserts a node before another in a list. */
-static
-void
-flst_insert_before(
-/*===============*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node2, /*!< in: node to insert */
- flst_node_t* node3, /*!< in: node to insert before */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-
-/********************************************************************//**
-Adds a node as the last node in a list. */
-void
-flst_add_last(
-/*==========*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node, /*!< in: node to add */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Insert a node after another one.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] cur insert position block
+@param[in] coffset byte offset of the insert position
+@param[in,out] add block to be added
+@param[in] aoffset byte offset of the block to be added
+@param[in,outr] mtr mini-transaction */
+static void flst_insert_after(buf_block_t *base, uint16_t boffset,
+ buf_block_t *cur, uint16_t coffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
- ulint space;
- fil_addr_t node_addr;
- ulint len;
- fil_addr_t last_addr;
-
- ut_ad(mtr && base && node);
- ut_ad(base != node);
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- len = flst_get_len(base);
- last_addr = flst_get_last(base);
-
- buf_ptr_get_fsp_addr(node, &space, &node_addr);
-
- /* If the list is not empty, call flst_insert_after */
- if (len != 0) {
- flst_node_t* last_node;
-
- if (last_addr.page == node_addr.page) {
- last_node = page_align(node) + last_addr.boffset;
- } else {
- fil_space_t* s = fil_space_acquire_silent(space);
- ulint zip_size = s ? s->zip_size() : 0;
- if (s) s->release();
-
- last_node = fut_get_ptr(space, zip_size, last_addr,
- RW_SX_LATCH, mtr);
- }
-
- flst_insert_after(base, last_node, node, mtr);
- } else {
- /* else call flst_add_to_empty */
- flst_add_to_empty(base, node, mtr);
- }
+ ut_ad(base != cur || boffset != coffset);
+ ut_ad(base != add || boffset != aoffset);
+ ut_ad(cur != add || coffset != aoffset);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(coffset < cur->physical_size());
+ ut_ad(aoffset < add->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ fil_addr_t cur_addr= { cur->page.id.page_no(), coffset };
+ fil_addr_t add_addr= { add->page.id.page_no(), aoffset };
+ fil_addr_t next_addr= flst_get_next_addr(cur->frame + coffset);
+
+ flst_write_addr(*add, add->frame + aoffset + FLST_PREV, cur_addr, mtr);
+ flst_write_addr(*add, add->frame + aoffset + FLST_NEXT, next_addr, mtr);
+
+ if (fil_addr_is_null(next_addr))
+ flst_write_addr(*base, base->frame + boffset + FLST_LAST, add_addr, mtr);
+ else
+ {
+ buf_block_t *block;
+ flst_node_t *next= fut_get_ptr(add->page.id.space(), add->zip_size(),
+ next_addr, RW_SX_LATCH, mtr, &block);
+ flst_write_addr(*block, next + FLST_PREV, add_addr, mtr);
+ }
+
+ flst_write_addr(*cur, cur->frame + coffset + FLST_NEXT, add_addr, mtr);
+
+ byte *len= &base->frame[boffset + FLST_LEN];
+ mtr->write<4>(*base, len, mach_read_from_4(len) + 1);
}
-/********************************************************************//**
-Adds a node as the first node in a list. */
-void
-flst_add_first(
-/*===========*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node, /*!< in: node to add */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Insert a node before another one.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] cur insert position block
+@param[in] coffset byte offset of the insert position
+@param[in,out] add block to be added
+@param[in] aoffset byte offset of the block to be added
+@param[in,outr] mtr mini-transaction */
+static void flst_insert_before(buf_block_t *base, uint16_t boffset,
+ buf_block_t *cur, uint16_t coffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
- ulint space;
- fil_addr_t node_addr;
- ulint len;
- fil_addr_t first_addr;
- flst_node_t* first_node;
-
- ut_ad(mtr && base && node);
- ut_ad(base != node);
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- len = flst_get_len(base);
- first_addr = flst_get_first(base);
-
- buf_ptr_get_fsp_addr(node, &space, &node_addr);
-
- /* If the list is not empty, call flst_insert_before */
- if (len != 0) {
- if (first_addr.page == node_addr.page) {
- first_node = page_align(node) + first_addr.boffset;
- } else {
- fil_space_t* s = fil_space_acquire_silent(space);
- ulint zip_size = s ? s->zip_size() : 0;
- if (s) s->release();
-
- first_node = fut_get_ptr(space, zip_size, first_addr,
- RW_SX_LATCH, mtr);
- }
-
- flst_insert_before(base, node, first_node, mtr);
- } else {
- /* else call flst_add_to_empty */
- flst_add_to_empty(base, node, mtr);
- }
+ ut_ad(base != cur || boffset != coffset);
+ ut_ad(base != add || boffset != aoffset);
+ ut_ad(cur != add || coffset != aoffset);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(coffset < cur->physical_size());
+ ut_ad(aoffset < add->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ fil_addr_t cur_addr= { cur->page.id.page_no(), coffset };
+ fil_addr_t add_addr= { add->page.id.page_no(), aoffset };
+ fil_addr_t prev_addr= flst_get_prev_addr(cur->frame + coffset);
+
+ flst_write_addr(*add, add->frame + aoffset + FLST_PREV, prev_addr, mtr);
+ flst_write_addr(*add, add->frame + aoffset + FLST_NEXT, cur_addr, mtr);
+
+ if (fil_addr_is_null(prev_addr))
+ flst_write_addr(*base, base->frame + boffset + FLST_FIRST, add_addr, mtr);
+ else
+ {
+ buf_block_t *block;
+ flst_node_t *prev= fut_get_ptr(add->page.id.space(), add->zip_size(),
+ prev_addr, RW_SX_LATCH, mtr, &block);
+ flst_write_addr(*block, prev + FLST_NEXT, add_addr, mtr);
+ }
+
+ flst_write_addr(*cur, cur->frame + coffset + FLST_PREV, add_addr, mtr);
+
+ byte *len= &base->frame[boffset + FLST_LEN];
+ mtr->write<4>(*base, len, mach_read_from_4(len) + 1);
}
-/********************************************************************//**
-Inserts a node after another in a list. */
-static
-void
-flst_insert_after(
-/*==============*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node1, /*!< in: node to insert after */
- flst_node_t* node2, /*!< in: node to add */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Append a file list node to a list.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] add block to be added
+@param[in] aoffset byte offset of the node to be added
+@param[in,outr] mtr mini-transaction */
+void flst_add_last(buf_block_t *base, uint16_t boffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
- ulint space;
- fil_addr_t node1_addr;
- fil_addr_t node2_addr;
- flst_node_t* node3;
- fil_addr_t node3_addr;
- ulint len;
-
- ut_ad(mtr && node1 && node2 && base);
- ut_ad(base != node1);
- ut_ad(base != node2);
- ut_ad(node2 != node1);
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node1,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node2,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
-
- buf_ptr_get_fsp_addr(node1, &space, &node1_addr);
- buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
-
- node3_addr = flst_get_next_addr(node1);
-
- /* Set prev and next fields of node2 */
- flst_write_addr(node2 + FLST_PREV, node1_addr, mtr);
- flst_write_addr(node2 + FLST_NEXT, node3_addr, mtr);
-
- if (!fil_addr_is_null(node3_addr)) {
- /* Update prev field of node3 */
- fil_space_t* s = fil_space_acquire_silent(space);
- ulint zip_size = s ? s->zip_size() : 0;
- if (s) s->release();
-
- node3 = fut_get_ptr(space, zip_size,
- node3_addr, RW_SX_LATCH, mtr);
- flst_write_addr(node3 + FLST_PREV, node2_addr, mtr);
- } else {
- /* node1 was last in list: update last field in base */
- flst_write_addr(base + FLST_LAST, node2_addr, mtr);
- }
-
- /* Set next field of node1 */
- flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr);
-
- /* Update len of base node */
- len = flst_get_len(base);
- mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr);
+ ut_ad(base != add || boffset != aoffset);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(aoffset < add->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ if (!flst_get_len(base->frame + boffset))
+ flst_add_to_empty(base, boffset, add, aoffset, mtr);
+ else
+ {
+ fil_addr_t addr= flst_get_last(base->frame + boffset);
+ buf_block_t *cur= add;
+ const flst_node_t *c= addr.page == add->page.id.page_no()
+ ? add->frame + addr.boffset
+ : fut_get_ptr(add->page.id.space(), add->zip_size(), addr,
+ RW_SX_LATCH, mtr, &cur);
+ flst_insert_after(base, boffset, cur,
+ static_cast<uint16_t>(c - cur->frame),
+ add, aoffset, mtr);
+ }
}
-/********************************************************************//**
-Inserts a node before another in a list. */
-static
-void
-flst_insert_before(
-/*===============*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node2, /*!< in: node to insert */
- flst_node_t* node3, /*!< in: node to insert before */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Prepend a file list node to a list.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] add block to be added
+@param[in] aoffset byte offset of the node to be added
+@param[in,outr] mtr mini-transaction */
+void flst_add_first(buf_block_t *base, uint16_t boffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
{
- ulint space;
- flst_node_t* node1;
- fil_addr_t node1_addr;
- fil_addr_t node2_addr;
- fil_addr_t node3_addr;
- ulint len;
-
- ut_ad(mtr && node2 && node3 && base);
- ut_ad(base != node2);
- ut_ad(base != node3);
- ut_ad(node2 != node3);
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node2,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node3,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
-
- buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
- buf_ptr_get_fsp_addr(node3, &space, &node3_addr);
-
- node1_addr = flst_get_prev_addr(node3);
-
- /* Set prev and next fields of node2 */
- flst_write_addr(node2 + FLST_PREV, node1_addr, mtr);
- flst_write_addr(node2 + FLST_NEXT, node3_addr, mtr);
-
- if (!fil_addr_is_null(node1_addr)) {
- fil_space_t* s = fil_space_acquire_silent(space);
- ulint zip_size = s ? s->zip_size() : 0;
- if (s) s->release();
-
- /* Update next field of node1 */
- node1 = fut_get_ptr(space, zip_size, node1_addr,
- RW_SX_LATCH, mtr);
- flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr);
- } else {
- /* node3 was first in list: update first field in base */
- flst_write_addr(base + FLST_FIRST, node2_addr, mtr);
- }
-
- /* Set prev field of node3 */
- flst_write_addr(node3 + FLST_PREV, node2_addr, mtr);
-
- /* Update len of base node */
- len = flst_get_len(base);
- mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr);
+ ut_ad(base != add || boffset != aoffset);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(aoffset < add->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ if (!flst_get_len(base->frame + boffset))
+ flst_add_to_empty(base, boffset, add, aoffset, mtr);
+ else
+ {
+ fil_addr_t addr= flst_get_first(base->frame + boffset);
+ buf_block_t *cur= add;
+ const flst_node_t *c= addr.page == add->page.id.page_no()
+ ? add->frame + addr.boffset
+ : fut_get_ptr(add->page.id.space(), add->zip_size(), addr,
+ RW_SX_LATCH, mtr, &cur);
+ flst_insert_before(base, boffset, cur,
+ static_cast<uint16_t>(c - cur->frame),
+ add, aoffset, mtr);
+ }
}
-/********************************************************************//**
-Removes a node. */
-void
-flst_remove(
-/*========*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node2, /*!< in: node to remove */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Remove a file list node.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] cur block to be removed
+@param[in] coffset byte offset of the current record to be removed
+@param[in,outr] mtr mini-transaction */
+void flst_remove(buf_block_t *base, uint16_t boffset,
+ buf_block_t *cur, uint16_t coffset, mtr_t *mtr)
{
- ulint space;
- flst_node_t* node1;
- fil_addr_t node1_addr;
- fil_addr_t node2_addr;
- flst_node_t* node3;
- fil_addr_t node3_addr;
- ulint len;
-
- ut_ad(mtr && node2 && base);
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(mtr_memo_contains_page_flagged(mtr, node2,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
-
- buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
-
- fil_space_t* s = fil_space_acquire_silent(space);
- ulint zip_size = s ? s->zip_size() : 0;
- if (s) s->release();
-
- node1_addr = flst_get_prev_addr(node2);
- node3_addr = flst_get_next_addr(node2);
-
- if (!fil_addr_is_null(node1_addr)) {
-
- /* Update next field of node1 */
-
- if (node1_addr.page == node2_addr.page) {
-
- node1 = page_align(node2) + node1_addr.boffset;
- } else {
- node1 = fut_get_ptr(space, zip_size,
- node1_addr, RW_SX_LATCH, mtr);
- }
-
- ut_ad(node1 != node2);
-
- flst_write_addr(node1 + FLST_NEXT, node3_addr, mtr);
- } else {
- /* node2 was first in list: update first field in base */
- flst_write_addr(base + FLST_FIRST, node3_addr, mtr);
- }
-
- if (!fil_addr_is_null(node3_addr)) {
- /* Update prev field of node3 */
-
- if (node3_addr.page == node2_addr.page) {
-
- node3 = page_align(node2) + node3_addr.boffset;
- } else {
- node3 = fut_get_ptr(space, zip_size,
- node3_addr, RW_SX_LATCH, mtr);
- }
-
- ut_ad(node2 != node3);
-
- flst_write_addr(node3 + FLST_PREV, node1_addr, mtr);
- } else {
- /* node2 was last in list: update last field in base */
- flst_write_addr(base + FLST_LAST, node1_addr, mtr);
- }
-
- /* Update len of base node */
- len = flst_get_len(base);
- ut_ad(len > 0);
-
- mlog_write_ulint(base + FLST_LEN, len - 1, MLOG_4BYTES, mtr);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(coffset < cur->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ const fil_addr_t prev_addr= flst_get_prev_addr(cur->frame + coffset);
+ const fil_addr_t next_addr= flst_get_next_addr(cur->frame + coffset);
+
+ if (fil_addr_is_null(prev_addr))
+ flst_write_addr(*base, base->frame + boffset + FLST_FIRST, next_addr, mtr);
+ else
+ {
+ buf_block_t *block= cur;
+ flst_node_t *prev= prev_addr.page == cur->page.id.page_no()
+ ? cur->frame + prev_addr.boffset
+ : fut_get_ptr(cur->page.id.space(), cur->zip_size(), prev_addr,
+ RW_SX_LATCH, mtr, &block);
+ flst_write_addr(*block, prev + FLST_NEXT, next_addr, mtr);
+ }
+
+ if (fil_addr_is_null(next_addr))
+ flst_write_addr(*base, base->frame + boffset + FLST_LAST, prev_addr, mtr);
+ else
+ {
+ buf_block_t *block= cur;
+ flst_node_t *next= next_addr.page == cur->page.id.page_no()
+ ? cur->frame + next_addr.boffset
+ : fut_get_ptr(cur->page.id.space(), cur->zip_size(), next_addr,
+ RW_SX_LATCH, mtr, &block);
+ flst_write_addr(*block, next + FLST_PREV, prev_addr, mtr);
+ }
+
+ byte *len= &base->frame[boffset + FLST_LEN];
+ ut_ad(mach_read_from_4(len) > 0);
+ mtr->write<4>(*base, len, mach_read_from_4(len) - 1);
}
-/********************************************************************//**
-Validates a file-based list.
-@return TRUE if ok */
-ibool
-flst_validate(
-/*==========*/
- const flst_base_node_t* base, /*!< in: pointer to base node of list */
- mtr_t* mtr1) /*!< in: mtr */
+#ifdef UNIV_DEBUG
+/** Validate a file-based list. */
+void flst_validate(const buf_block_t *base, uint16_t boffset, mtr_t *mtr)
{
- ulint space;
- const flst_node_t* node;
- fil_addr_t node_addr;
- fil_addr_t base_addr;
- ulint len;
- ulint i;
- mtr_t mtr2;
-
- ut_ad(base);
- ut_ad(mtr_memo_contains_page_flagged(mtr1, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
-
- /* We use two mini-transaction handles: the first is used to
- lock the base node, and prevent other threads from modifying the
- list. The second is used to traverse the list. We cannot run the
- second mtr without committing it at times, because if the list
- is long, then the x-locked pages could fill the buffer resulting
- in a deadlock. */
-
- /* Find out the space id */
- buf_ptr_get_fsp_addr(base, &space, &base_addr);
-
- fil_space_t* s = fil_space_acquire_silent(space);
- ulint zip_size = s ? s->zip_size() : 0;
- if (s) s->release();
-
- len = flst_get_len(base);
- node_addr = flst_get_first(base);
-
- for (i = 0; i < len; i++) {
- mtr_start(&mtr2);
-
- node = fut_get_ptr(space, zip_size,
- node_addr, RW_SX_LATCH, &mtr2);
- node_addr = flst_get_next_addr(node);
-
- mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer
- becoming full */
- }
-
- ut_a(fil_addr_is_null(node_addr));
-
- node_addr = flst_get_last(base);
-
- for (i = 0; i < len; i++) {
- mtr_start(&mtr2);
-
- node = fut_get_ptr(space, zip_size,
- node_addr, RW_SX_LATCH, &mtr2);
- node_addr = flst_get_prev_addr(node);
-
- mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer
- becoming full */
- }
-
- ut_a(fil_addr_is_null(node_addr));
-
- return(TRUE);
+ ut_ad(boffset < base->physical_size());
+ ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
+ MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ /* We use two mini-transaction handles: the first is used to lock
+ the base node, and prevent other threads from modifying the list.
+ The second is used to traverse the list. We cannot run the second
+ mtr without committing it at times, because if the list is long,
+ the x-locked pages could fill the buffer, resulting in a deadlock. */
+ mtr_t mtr2;
+
+ const uint32_t len= flst_get_len(base->frame + boffset);
+ fil_addr_t addr= flst_get_first(base->frame + boffset);
+
+ for (uint32_t i= len; i--; )
+ {
+ mtr2.start();
+ const flst_node_t *node= fut_get_ptr(base->page.id.space(),
+ base->zip_size(), addr,
+ RW_SX_LATCH, &mtr2);
+ addr= flst_get_next_addr(node);
+ mtr2.commit();
+ }
+
+ ut_ad(fil_addr_is_null(addr));
+
+ addr= flst_get_last(base->frame + boffset);
+
+ for (uint32_t i= len; i--; )
+ {
+ mtr2.start();
+ const flst_node_t *node= fut_get_ptr(base->page.id.space(),
+ base->zip_size(), addr,
+ RW_SX_LATCH, &mtr2);
+ addr= flst_get_prev_addr(node);
+ mtr2.commit();
+ }
+
+ ut_ad(fil_addr_is_null(addr));
}
+#endif
diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc
index b609fc7c90e..9dd50fb3cf1 100644
--- a/storage/innobase/gis/gis0rtree.cc
+++ b/storage/innobase/gis/gis0rtree.cc
@@ -628,12 +628,8 @@ rtr_adjust_upper_level(
rtr_mbr_t* new_mbr, /*!< in: MBR on the new page */
mtr_t* mtr) /*!< in: mtr */
{
- page_t* page;
- page_t* new_page;
ulint page_no;
ulint new_page_no;
- page_zip_des_t* page_zip;
- page_zip_des_t* new_page_zip;
dict_index_t* index = sea_cur->index;
btr_cur_t cursor;
ulint* offsets;
@@ -657,13 +653,9 @@ rtr_adjust_upper_level(
level = btr_page_get_level(buf_block_get_frame(block));
ut_ad(level == btr_page_get_level(buf_block_get_frame(new_block)));
- page = buf_block_get_frame(block);
page_no = block->page.id.page_no();
- page_zip = buf_block_get_page_zip(block);
- new_page = buf_block_get_frame(new_block);
new_page_no = new_block->page.id.page_no();
- new_page_zip = buf_block_get_page_zip(new_block);
/* Set new mbr for the old page on the upper level. */
/* Look up the index for the node pointer to page */
@@ -672,7 +664,8 @@ rtr_adjust_upper_level(
page_cursor = btr_cur_get_page_cur(&cursor);
- rtr_update_mbr_field(&cursor, offsets, NULL, page, mbr, NULL, mtr);
+ rtr_update_mbr_field(&cursor, offsets, NULL, block->frame, mbr, NULL,
+ mtr);
/* Already updated parent MBR, reset in our path */
if (sea_cur->rtr_info) {
@@ -686,7 +679,7 @@ rtr_adjust_upper_level(
/* Insert the node for the new page. */
node_ptr_upper = rtr_index_build_node_ptr(
index, new_mbr,
- page_rec_get_next(page_get_infimum_rec(new_page)),
+ page_rec_get_next(page_get_infimum_rec(new_block->frame)),
new_page_no, heap);
ulint up_match = 0;
@@ -742,26 +735,25 @@ rtr_adjust_upper_level(
ut_ad(block->zip_size() == index->table->space->zip_size());
- const uint32_t next_page_no = btr_page_get_next(page);
+ const uint32_t next_page_no = btr_page_get_next(block->frame);
if (next_page_no != FIL_NULL) {
buf_block_t* next_block = btr_block_get(
*index, next_page_no, RW_X_LATCH, false, mtr);
#ifdef UNIV_BTR_DEBUG
- ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
+ ut_a(page_is_comp(next_block->frame)
+ == page_is_comp(block->frame));
ut_a(btr_page_get_prev(next_block->frame)
== block->page.id.page_no());
#endif /* UNIV_BTR_DEBUG */
- btr_page_set_prev(buf_block_get_frame(next_block),
- buf_block_get_page_zip(next_block),
- new_page_no, mtr);
+ btr_page_set_prev(next_block, new_page_no, mtr);
}
- btr_page_set_next(page, page_zip, new_page_no, mtr);
+ btr_page_set_next(block, new_page_no, mtr);
- btr_page_set_prev(new_page, new_page_zip, page_no, mtr);
- btr_page_set_next(new_page, new_page_zip, next_page_no, mtr);
+ btr_page_set_prev(new_block, page_no, mtr);
+ btr_page_set_next(new_block, next_page_no, mtr);
}
/*************************************************************//**
@@ -848,11 +840,8 @@ rtr_split_page_move_rec_list(
ut_ad(!is_leaf || cur_split_node->key != first_rec);
rec = page_cur_insert_rec_low(
- page_cur_get_rec(&new_page_cursor),
- index,
- cur_split_node->key,
- offsets,
- mtr);
+ page_cur_get_rec(&new_page_cursor),
+ index, cur_split_node->key, offsets, mtr);
ut_a(rec);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 23691a2bd4a..4e2dd00a37a 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -17856,14 +17856,10 @@ func_exit:
space->zip_size(), RW_X_LATCH, &mtr);
if (block != NULL) {
- byte* page = block->frame;
-
- ib::info() << "Dirtying page: " << page_id_t(
- page_get_space_id(page), page_get_page_no(page));
-
- mlog_write_ulint(page + FIL_PAGE_TYPE,
- fil_page_get_type(page),
- MLOG_2BYTES, &mtr);
+ ib::info() << "Dirtying page: " << block->page.id;
+ mtr.write<1,mtr_t::FORCED>(*block,
+ block->frame + FIL_PAGE_SPACE_ID,
+ block->frame[FIL_PAGE_SPACE_ID]);
}
mtr.commit();
goto func_exit;
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 786a176fbb2..146b08f2b82 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -10198,12 +10198,12 @@ commit_cache_norebuild(
space->zip_size(),
RW_X_LATCH, &mtr)) {
mtr.set_named_space(space);
- mlog_write_ulint(
+ mtr.write<4,mtr_t::OPT>(
+ *b,
FSP_HEADER_OFFSET
+ FSP_SPACE_FLAGS + b->frame,
space->flags
- & ~FSP_FLAGS_MEM_MASK,
- MLOG_4BYTES, &mtr);
+ & ~FSP_FLAGS_MEM_MASK);
}
mtr.commit();
}
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index a9f9b14c696..45234276705 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -332,17 +332,12 @@ ibuf_header_page_get(
return page;
}
-/******************************************************************//**
-Gets the root page and sx-latches it.
-@return insert buffer tree root page */
-static
-page_t*
-ibuf_tree_root_get(
-/*===============*/
- mtr_t* mtr) /*!< in: mtr */
+/** Acquire the change buffer root page.
+@param[in,out] mtr mini-transaction
+@return change buffer root page, SX-latched */
+static buf_block_t *ibuf_tree_root_get(mtr_t *mtr)
{
buf_block_t* block;
- page_t* root;
ut_ad(ibuf_inside(mtr));
ut_ad(mutex_own(&ibuf_mutex));
@@ -356,13 +351,11 @@ ibuf_tree_root_get(
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
- root = buf_block_get_frame(block);
-
- ut_ad(page_get_space_id(root) == IBUF_SPACE_ID);
- ut_ad(page_get_page_no(root) == FSP_IBUF_TREE_ROOT_PAGE_NO);
- ut_ad(ibuf.empty == page_is_empty(root));
+ ut_ad(page_get_space_id(block->frame) == IBUF_SPACE_ID);
+ ut_ad(page_get_page_no(block->frame) == FSP_IBUF_TREE_ROOT_PAGE_NO);
+ ut_ad(ibuf.empty == page_is_empty(block->frame));
- return(root);
+ return block;
}
/******************************************************************//**
@@ -624,29 +617,27 @@ ibuf_bitmap_page_get_bits_low(
}
/** Sets the desired bit for a given page in a bitmap page.
-@param[in,out] page bitmap page
+@tparam bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
+@param[in,out] block bitmap page
@param[in] page_id page id whose bits to set
@param[in] physical_size page size
-@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param[in] val value to set
@param[in,out] mtr mtr containing an x-latch to the bitmap page */
-static
-void
+template<ulint bit>
+static void
ibuf_bitmap_page_set_bits(
- page_t* page,
+ buf_block_t* block,
const page_id_t page_id,
ulint physical_size,
- ulint bit,
ulint val,
mtr_t* mtr)
{
ulint byte_offset;
ulint bit_offset;
- ulint map_byte;
- ut_ad(bit < IBUF_BITS_PER_PAGE);
+ static_assert(bit < IBUF_BITS_PER_PAGE, "wrong bit");
compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
- ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
+ ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->is_named_space(page_id.space()));
bit_offset = (page_id.page_no() % physical_size)
@@ -657,21 +648,22 @@ ibuf_bitmap_page_set_bits(
ut_ad(byte_offset + IBUF_BITMAP < srv_page_size);
- map_byte = mach_read_from_1(page + IBUF_BITMAP + byte_offset);
+ byte* map_byte = &block->frame[IBUF_BITMAP + byte_offset];
+ byte b = *map_byte;
if (bit == IBUF_BITMAP_FREE) {
ut_ad(bit_offset + 1 < 8);
ut_ad(val <= 3);
-
- map_byte = ut_bit_set_nth(map_byte, bit_offset, val / 2);
- map_byte = ut_bit_set_nth(map_byte, bit_offset + 1, val % 2);
+ b &= ~(3U << bit_offset);
+ b |= (val & 2) << (bit_offset - 1)
+ | (val & 1) << (bit_offset + 1);
} else {
ut_ad(val <= 1);
- map_byte = ut_bit_set_nth(map_byte, bit_offset, val);
+ b &= ~(1U << bit_offset);
+ b |= val << bit_offset;
}
- mlog_write_ulint(page + IBUF_BITMAP + byte_offset, map_byte,
- MLOG_1BYTE, mtr);
+ mtr->write<1,mtr_t::OPT>(*block, map_byte, b);
}
/** Calculates the bitmap page number for a given page number.
@@ -697,7 +689,7 @@ stored.
page containing the descriptor bits for the file page; the bitmap page
is x-latched */
static
-page_t*
+buf_block_t*
ibuf_bitmap_get_map_page_func(
const page_id_t page_id,
ulint zip_size,
@@ -718,8 +710,7 @@ ibuf_bitmap_get_map_page_func(
buf_block_dbg_add_level(block, SYNC_IBUF_BITMAP);
-
- return(buf_block_get_frame(block));
+ return block;
}
/** Gets the ibuf bitmap page where the bits describing a given file page are
@@ -749,31 +740,19 @@ ibuf_set_free_bits_low(
ulint val, /*!< in: value to set: < 4 */
mtr_t* mtr) /*!< in/out: mtr */
{
- page_t* bitmap_page;
- buf_frame_t* frame;
-
ut_ad(mtr->is_named_space(block->page.id.space()));
-
- if (!block) {
+ if (!page_is_leaf(block->frame)) {
return;
}
- frame = buf_block_get_frame(block);
-
- if (!frame || !page_is_leaf(frame)) {
- return;
- }
-
- bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->zip_size(), mtr);
-
#ifdef UNIV_IBUF_DEBUG
ut_a(val <= ibuf_index_page_calc_free(block));
#endif /* UNIV_IBUF_DEBUG */
- ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->physical_size(),
- IBUF_BITMAP_FREE, val, mtr);
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
+ ibuf_bitmap_get_map_page(block->page.id, block->zip_size(),
+ mtr),
+ block->page.id, block->physical_size(), val, mtr);
}
/************************************************************************//**
@@ -793,34 +772,21 @@ ibuf_set_free_bits_func(
#endif /* UNIV_IBUF_DEBUG */
ulint val) /*!< in: value to set: < 4 */
{
- mtr_t mtr;
- page_t* page;
- page_t* bitmap_page;
-
- page = buf_block_get_frame(block);
-
- if (!page_is_leaf(page)) {
-
+ if (!page_is_leaf(block->frame)) {
return;
}
- mtr_start(&mtr);
+ mtr_t mtr;
+ mtr.start();
const fil_space_t* space = mtr.set_named_space_id(
block->page.id.space());
- bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->zip_size(), &mtr);
+ buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
+ block->zip_size(),
+ &mtr);
- switch (space->purpose) {
- case FIL_TYPE_LOG:
- ut_ad(0);
- break;
- case FIL_TYPE_TABLESPACE:
- break;
- /* fall through */
- case FIL_TYPE_TEMPORARY:
- case FIL_TYPE_IMPORT:
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
+ if (space->purpose != FIL_TYPE_TABLESPACE) {
+ mtr.set_log_mode(MTR_LOG_NO_REDO);
}
#ifdef UNIV_IBUF_DEBUG
@@ -830,31 +796,17 @@ ibuf_set_free_bits_func(
old_val = ibuf_bitmap_page_get_bits(
bitmap_page, block->page.id,
IBUF_BITMAP_FREE, &mtr);
-# if 0
- if (old_val != max_val) {
- fprintf(stderr,
- "Ibuf: page %lu old val %lu max val %lu\n",
- page_get_page_no(page),
- old_val, max_val);
- }
-# endif
-
ut_a(old_val <= max_val);
}
-# if 0
- fprintf(stderr, "Setting page no %lu free bits to %lu should be %lu\n",
- page_get_page_no(page), val,
- ibuf_index_page_calc_free(block));
-# endif
ut_a(val <= ibuf_index_page_calc_free(block));
#endif /* UNIV_IBUF_DEBUG */
- ibuf_bitmap_page_set_bits(
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
bitmap_page, block->page.id, block->physical_size(),
- IBUF_BITMAP_FREE, val, &mtr);
+ val, &mtr);
- mtr_commit(&mtr);
+ mtr.commit();
}
/************************************************************************//**
@@ -929,19 +881,10 @@ ibuf_update_free_bits_zip(
buf_block_t* block, /*!< in/out: index page */
mtr_t* mtr) /*!< in/out: mtr */
{
- page_t* bitmap_page;
- ulint after;
-
- ut_a(block);
- buf_frame_t* frame = buf_block_get_frame(block);
- ut_a(frame);
- ut_a(page_is_leaf(frame));
- ut_a(block->zip_size());
+ ut_ad(page_is_leaf(block->frame));
+ ut_ad(block->zip_size());
- bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->zip_size(), mtr);
-
- after = ibuf_index_page_calc_free_zip(block);
+ ulint after = ibuf_index_page_calc_free_zip(block);
if (after == 0) {
/* We move the page to the front of the buffer pool LRU list:
@@ -952,9 +895,10 @@ ibuf_update_free_bits_zip(
buf_page_make_young(&block->page);
}
- ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->physical_size(),
- IBUF_BITMAP_FREE, after, mtr);
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
+ ibuf_bitmap_get_map_page(block->page.id, block->zip_size(),
+ mtr),
+ block->page.id, block->physical_size(), after, mtr);
}
/**********************************************************************//**
@@ -1029,7 +973,6 @@ ibuf_page_low(
{
ibool ret;
mtr_t local_mtr;
- page_t* bitmap_page;
ut_ad(!recv_no_ibuf_operations);
ut_ad(x_latch || mtr == NULL);
@@ -1064,10 +1007,8 @@ ibuf_page_low(
zip_size, RW_NO_LATCH, NULL, BUF_GET_NO_LATCH,
file, line, &local_mtr, &err);
- bitmap_page = buf_block_get_frame(block);
-
ret = ibuf_bitmap_page_get_bits_low(
- bitmap_page, page_id, zip_size,
+ block->frame, page_id, zip_size,
MTR_MEMO_BUF_FIX, &local_mtr, IBUF_BITMAP_IBUF);
mtr_commit(&local_mtr);
@@ -1080,10 +1021,10 @@ ibuf_page_low(
mtr_start(mtr);
}
- bitmap_page = ibuf_bitmap_get_map_page_func(page_id, zip_size,
- file, line, mtr);
-
- ret = ibuf_bitmap_page_get_bits(bitmap_page, page_id, zip_size,
+ ret = ibuf_bitmap_page_get_bits(ibuf_bitmap_get_map_page_func(
+ page_id, zip_size, file, line,
+ mtr)->frame,
+ page_id, zip_size,
IBUF_BITMAP_IBUF, mtr);
if (mtr == &local_mtr) {
@@ -1891,23 +1832,16 @@ ibuf_data_too_much_free(void)
return(ibuf.free_list_len >= 3 + (ibuf.size / 2) + 3 * ibuf.height);
}
-/*********************************************************************//**
-Allocates a new page from the ibuf file segment and adds it to the free
-list.
-@return TRUE on success, FALSE if no space left */
-static
-ibool
-ibuf_add_free_page(void)
-/*====================*/
+/** Allocate a change buffer page.
+@retval true on success
+@retval false if no space left */
+static bool ibuf_add_free_page()
{
mtr_t mtr;
page_t* header_page;
buf_block_t* block;
- page_t* page;
- page_t* root;
- page_t* bitmap_page;
- mtr_start(&mtr);
+ mtr.start();
/* Acquire the fsp latch before the ibuf header, obeying the latching
order */
mtr_x_lock_space(fil_system.sys_space, &mtr);
@@ -1928,26 +1862,24 @@ ibuf_add_free_page(void)
&mtr);
if (block == NULL) {
- mtr_commit(&mtr);
-
- return(FALSE);
+ mtr.commit();
+ return false;
}
ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1);
ibuf_enter(&mtr);
mutex_enter(&ibuf_mutex);
- root = ibuf_tree_root_get(&mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
- page = buf_block_get_frame(block);
- mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_IBUF_FREE_LIST,
- MLOG_2BYTES, &mtr);
+ mtr.write<2>(*block, block->frame + FIL_PAGE_TYPE,
+ FIL_PAGE_IBUF_FREE_LIST);
/* Add the page to the free list and update the ibuf size data */
- flst_add_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
+ flst_add_last(ibuf_tree_root_get(&mtr),
+ PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
ibuf.seg_size++;
ibuf.free_list_len++;
@@ -1955,17 +1887,18 @@ ibuf_add_free_page(void)
/* Set the bit indicating that this page is now an ibuf tree page
(level 2 page) */
- const page_id_t page_id(IBUF_SPACE_ID, block->page.id.page_no());
- bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
+ const page_id_t page_id(IBUF_SPACE_ID, block->page.id.page_no());
+ buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
mutex_exit(&ibuf_mutex);
- ibuf_bitmap_page_set_bits(bitmap_page, page_id, srv_page_size,
- IBUF_BITMAP_IBUF, TRUE, &mtr);
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_IBUF>(bitmap_page, page_id,
+ srv_page_size, true,
+ &mtr);
ibuf_mtr_commit(&mtr);
- return(TRUE);
+ return true;
}
/*********************************************************************//**
@@ -1979,9 +1912,6 @@ ibuf_remove_free_page(void)
mtr_t mtr2;
page_t* header_page;
ulint page_no;
- page_t* page;
- page_t* root;
- page_t* bitmap_page;
log_free_check();
@@ -2009,12 +1939,12 @@ ibuf_remove_free_page(void)
ibuf_mtr_start(&mtr2);
- root = ibuf_tree_root_get(&mtr2);
+ buf_block_t* root = ibuf_tree_root_get(&mtr2);
mutex_exit(&ibuf_mutex);
page_no = flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST
- + root).page;
+ + root->frame).page;
/* NOTE that we must release the latch on the ibuf tree root
because in fseg_free_page we access level 1 pages, and the root
@@ -2044,22 +1974,15 @@ ibuf_remove_free_page(void)
root = ibuf_tree_root_get(&mtr);
ut_ad(page_no == flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST
- + root).page);
-
- {
- buf_block_t* block;
-
- block = buf_page_get(page_id, 0, RW_X_LATCH, &mtr);
+ + root->frame).page);
- buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
-
- page = buf_block_get_frame(block);
- }
+ buf_block_t* block = buf_page_get(page_id, 0, RW_X_LATCH, &mtr);
+ buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
/* Remove the page from the free list and update the ibuf size data */
- flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
+ flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
mutex_exit(&ibuf_pessimistic_insert_mutex);
@@ -2069,13 +1992,12 @@ ibuf_remove_free_page(void)
/* Set the bit indicating that this page is no more an ibuf tree page
(level 2 page) */
- bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
+ buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
mutex_exit(&ibuf_mutex);
- ibuf_bitmap_page_set_bits(
- bitmap_page, page_id, srv_page_size,
- IBUF_BITMAP_IBUF, FALSE, &mtr);
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_IBUF>(
+ bitmap_page, page_id, srv_page_size, false, &mtr);
ut_d(buf_page_set_file_page_was_freed(page_id));
@@ -3305,8 +3227,7 @@ ibuf_insert_low(
ulint buffered;
lint min_n_recs;
rec_t* ins_rec;
- ibool old_bit_value;
- page_t* bitmap_page;
+ buf_block_t* bitmap_page;
buf_block_t* block;
page_t* root;
dberr_t err;
@@ -3459,8 +3380,8 @@ fail_exit:
if (op == IBUF_OP_INSERT) {
ulint bits = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, physical_size, IBUF_BITMAP_FREE,
- &bitmap_mtr);
+ bitmap_page->frame, page_id, physical_size,
+ IBUF_BITMAP_FREE, &bitmap_mtr);
if (buffered + entry_size + page_dir_calc_reserved_space(1)
> ibuf_index_page_calc_free_from_bits(physical_size,
@@ -3505,17 +3426,8 @@ fail_exit:
/* Set the bitmap bit denoting that the insert buffer contains
buffered entries for this index page, if the bit is not set yet */
-
- old_bit_value = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, physical_size,
- IBUF_BITMAP_BUFFERED, &bitmap_mtr);
-
- if (!old_bit_value) {
- ibuf_bitmap_page_set_bits(bitmap_page, page_id, physical_size,
- IBUF_BITMAP_BUFFERED, TRUE,
- &bitmap_mtr);
- }
-
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
+ bitmap_page, page_id, physical_size, true, &bitmap_mtr);
ibuf_mtr_commit(&bitmap_mtr);
cursor = btr_pcur_get_btr_cur(&pcur);
@@ -3548,7 +3460,7 @@ fail_exit:
which would cause the sx-latching of the root after that to
break the latching order. */
- root = ibuf_tree_root_get(&mtr);
+ root = ibuf_tree_root_get(&mtr)->frame;
err = btr_cur_optimistic_insert(
BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
@@ -3760,9 +3672,6 @@ ibuf_insert_to_index_page_low(
page_cur_t* page_cur)/*!< in/out: cursor positioned on the record
after which to insert the buffered entry */
{
- const page_t* page;
- const page_t* bitmap_page;
- ulint old_bits;
rec_t* rec;
DBUG_ENTER("ibuf_insert_to_index_page_low");
@@ -3790,11 +3699,10 @@ ibuf_insert_to_index_page_low(
DBUG_RETURN(rec);
}
- page = buf_block_get_frame(block);
-
ib::error() << "Insert buffer insert fails; page free "
- << page_get_max_insert_size(page, 1) << ", dtuple size "
- << rec_get_converted_size(index, entry, 0);
+ << page_get_max_insert_size(block->frame, 1)
+ << ", dtuple size "
+ << rec_get_converted_size(index, entry, 0);
fputs("InnoDB: Cannot insert index record ", stderr);
dtuple_print(stderr, entry);
@@ -3802,14 +3710,14 @@ ibuf_insert_to_index_page_low(
"InnoDB: is now probably corrupt. Please run CHECK TABLE on\n"
"InnoDB: that table.\n", stderr);
- bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->zip_size(), mtr);
- old_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, block->page.id, block->zip_size(),
- IBUF_BITMAP_FREE, mtr);
-
ib::error() << "page " << block->page.id << ", size "
- << block->physical_size() << ", bitmap bits " << old_bits;
+ << block->physical_size() << ", bitmap bits "
+ << ibuf_bitmap_page_get_bits(
+ ibuf_bitmap_get_map_page(block->page.id,
+ block->zip_size(),
+ mtr)->frame,
+ block->page.id, block->zip_size(),
+ IBUF_BITMAP_FREE, mtr);
ib::error() << BUG_REPORT_MSG;
@@ -3898,7 +3806,6 @@ dump:
if (UNIV_UNLIKELY(low_match == dtuple_get_n_fields(entry))) {
upd_t* update;
- page_zip_des_t* page_zip;
rec = page_cur_get_rec(&page_cur);
@@ -3910,8 +3817,7 @@ dump:
ULINT_UNDEFINED, &heap);
update = row_upd_build_sec_rec_difference_binary(
rec, index, offsets, entry, heap);
-
- page_zip = buf_block_get_page_zip(block);
+ page_zip_des_t* page_zip = buf_block_get_page_zip(block);
if (update->n_fields == 0) {
/* The records only differ in the delete-mark.
@@ -4023,7 +3929,7 @@ ibuf_set_del_mark(
if (low_match == dtuple_get_n_fields(entry)) {
rec_t* rec;
- page_zip_des_t* page_zip;
+ page_zip_des_t* page_zip;
rec = page_cur_get_rec(&page_cur);
page_zip = page_cur_get_page_zip(&page_cur);
@@ -4272,7 +4178,7 @@ bool ibuf_delete_rec(ulint space, ulint page_no, btr_pcur_t* pcur,
goto func_exit;
}
- root = ibuf_tree_root_get(mtr);
+ root = ibuf_tree_root_get(mtr)->frame;
btr_cur_pessimistic_delete(&err, TRUE, btr_pcur_get_btr_cur(pcur), 0,
false, mtr);
@@ -4317,10 +4223,10 @@ bool ibuf_page_exists(const buf_page_t& bpage)
bool bitmap_bits = false;
ibuf_mtr_start(&mtr);
- if (const page_t* bitmap_page = ibuf_bitmap_get_map_page(
+ if (const buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
bpage.id, bpage.zip_size(), &mtr)) {
bitmap_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, bpage.id, bpage.zip_size(),
+ bitmap_page->frame, bpage.id, bpage.zip_size(),
IBUF_BITMAP_BUFFERED, &mtr) != 0;
}
ibuf_mtr_commit(&mtr);
@@ -4351,7 +4257,6 @@ ibuf_merge_or_delete_for_page(
#ifdef UNIV_IBUF_DEBUG
ulint volume = 0;
#endif /* UNIV_IBUF_DEBUG */
- page_zip_des_t* page_zip = NULL;
bool corruption_noticed = false;
mtr_t mtr;
@@ -4385,18 +4290,18 @@ ibuf_merge_or_delete_for_page(
block = NULL;
update_ibuf_bitmap = false;
} else {
- page_t* bitmap_page = NULL;
ulint bitmap_bits = 0;
ibuf_mtr_start(&mtr);
- bitmap_page = ibuf_bitmap_get_map_page(
+ buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
page_id, zip_size, &mtr);
- if (bitmap_page &&
- fil_page_get_type(bitmap_page) != FIL_PAGE_TYPE_ALLOCATED) {
+ if (bitmap_page
+ && fil_page_get_type(bitmap_page->frame)
+ != FIL_PAGE_TYPE_ALLOCATED) {
bitmap_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, zip_size,
+ bitmap_page->frame, page_id, zip_size,
IBUF_BITMAP_BUFFERED, &mtr);
}
@@ -4429,7 +4334,6 @@ ibuf_merge_or_delete_for_page(
the debug checks. */
rw_lock_x_lock_move_ownership(&(block->lock));
- page_zip = buf_block_get_page_zip(block);
if (!fil_page_index_page_check(block->frame)
|| !page_is_leaf(block->frame)) {
@@ -4496,8 +4400,7 @@ loop:
|| ibuf_rec_get_space(&mtr, rec) != page_id.space()) {
if (block != NULL) {
- page_header_reset_last_insert(
- block->frame, page_zip, &mtr);
+ page_header_reset_last_insert(block, &mtr);
}
goto reset_bit;
@@ -4519,8 +4422,9 @@ loop:
ibuf_op_t op = ibuf_rec_get_op_type(&mtr, rec);
max_trx_id = page_get_max_trx_id(page_align(rec));
- page_update_max_trx_id(block, page_zip, max_trx_id,
- &mtr);
+ page_update_max_trx_id(block,
+ buf_block_get_page_zip(block),
+ max_trx_id, &mtr);
ut_ad(page_validate(page_align(rec), ibuf.index));
@@ -4635,28 +4539,17 @@ loop:
}
reset_bit:
- if (update_ibuf_bitmap) {
- page_t* bitmap_page;
-
- bitmap_page = ibuf_bitmap_get_map_page(page_id, zip_size,
- &mtr);
-
- ibuf_bitmap_page_set_bits(
- bitmap_page, page_id, physical_size,
- IBUF_BITMAP_BUFFERED, FALSE, &mtr);
+ if (!update_ibuf_bitmap) {
+ } else if (buf_block_t* bitmap = ibuf_bitmap_get_map_page(
+ page_id, zip_size, &mtr)) {
+ /* FIXME: update the bitmap byte only once! */
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
+ bitmap, page_id, physical_size, false, &mtr);
if (block != NULL) {
- ulint old_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, zip_size,
- IBUF_BITMAP_FREE, &mtr);
-
- ulint new_bits = ibuf_index_page_calc_free(block);
-
- if (old_bits != new_bits) {
- ibuf_bitmap_page_set_bits(
- bitmap_page, page_id, physical_size,
- IBUF_BITMAP_FREE, new_bits, &mtr);
- }
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
+ bitmap, page_id, physical_size,
+ ibuf_index_page_calc_free(block), &mtr);
}
}
@@ -4760,18 +4653,15 @@ bool
ibuf_is_empty(void)
/*===============*/
{
- bool is_empty;
- const page_t* root;
mtr_t mtr;
ibuf_mtr_start(&mtr);
- mutex_enter(&ibuf_mutex);
- root = ibuf_tree_root_get(&mtr);
- mutex_exit(&ibuf_mutex);
-
- is_empty = page_is_empty(root);
+ ut_d(mutex_enter(&ibuf_mutex));
+ const buf_block_t* root = ibuf_tree_root_get(&mtr);
+ bool is_empty = page_is_empty(root->frame);
ut_a(is_empty == ibuf.empty);
+ ut_d(mutex_exit(&ibuf_mutex));
ibuf_mtr_commit(&mtr);
return(is_empty);
@@ -4849,9 +4739,6 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
the space, as usual. */
for (page_no = 0; page_no < size; page_no += physical_size) {
- page_t* bitmap_page;
- ulint i;
-
if (trx_is_interrupted(trx)) {
mutex_exit(&ibuf_mutex);
return(DB_INTERRUPTED);
@@ -4863,10 +4750,15 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
ibuf_enter(&mtr);
- bitmap_page = ibuf_bitmap_get_map_page(
+ buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
page_id_t(space->id, page_no), zip_size, &mtr);
+ if (!bitmap_page) {
+ mutex_exit(&ibuf_mutex);
+ mtr.commit();
+ return DB_CORRUPTION;
+ }
- if (buf_page_is_zeroes(bitmap_page, physical_size)) {
+ if (buf_page_is_zeroes(bitmap_page->frame, physical_size)) {
/* This means we got all-zero page instead of
ibuf bitmap page. The subsequent page should be
all-zero pages. */
@@ -4886,17 +4778,13 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
continue;
}
- if (!bitmap_page) {
- mutex_exit(&ibuf_mutex);
- return DB_CORRUPTION;
- }
-
- for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size; i++) {
+ for (ulint i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size;
+ i++) {
const ulint offset = page_no + i;
const page_id_t cur_page_id(space->id, offset);
if (ibuf_bitmap_page_get_bits(
- bitmap_page, cur_page_id, zip_size,
+ bitmap_page->frame, cur_page_id, zip_size,
IBUF_BITMAP_IBUF, &mtr)) {
mutex_exit(&ibuf_mutex);
@@ -4914,7 +4802,7 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
}
if (ibuf_bitmap_page_get_bits(
- bitmap_page, cur_page_id, zip_size,
+ bitmap_page->frame, cur_page_id, zip_size,
IBUF_BITMAP_BUFFERED, &mtr)) {
ib_errf(trx->mysql_thd,
@@ -4928,10 +4816,9 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
/* Tolerate this error, so that
slightly corrupted tables can be
imported and dumped. Clear the bit. */
- ibuf_bitmap_page_set_bits(
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
bitmap_page, cur_page_id,
- physical_size,
- IBUF_BITMAP_BUFFERED, FALSE, &mtr);
+ physical_size, false, &mtr);
}
}
@@ -4951,7 +4838,6 @@ ibuf_set_bitmap_for_bulk_load(
buf_block_t* block,
bool reset)
{
- page_t* bitmap_page;
mtr_t mtr;
ulint free_val;
@@ -4959,20 +4845,22 @@ ibuf_set_bitmap_for_bulk_load(
free_val = ibuf_index_page_calc_free(block);
- mtr_start(&mtr);
+ mtr.start();
fil_space_t* space = mtr.set_named_space_id(block->page.id.space());
- bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- space->zip_size(), &mtr);
+ buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
+ space->zip_size(),
+ &mtr);
free_val = reset ? 0 : ibuf_index_page_calc_free(block);
- ibuf_bitmap_page_set_bits(
+ /* FIXME: update the bitmap byte only once! */
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
bitmap_page, block->page.id, block->physical_size(),
- IBUF_BITMAP_FREE, free_val, &mtr);
+ free_val, &mtr);
- ibuf_bitmap_page_set_bits(
+ ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
bitmap_page, block->page.id, block->physical_size(),
- IBUF_BITMAP_BUFFERED, FALSE, &mtr);
+ false, &mtr);
- mtr_commit(&mtr);
+ mtr.commit();
}
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index e1b3158f997..c2d7c786bc5 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -541,7 +541,7 @@ inline void btr_set_min_rec_mark(rec_t *rec, const buf_block_t &block,
page. We are not modifying the compressed page frame at all. */
*rec|= REC_INFO_MIN_REC_FLAG;
else
- mlog_write_ulint(rec, *rec | REC_INFO_MIN_REC_FLAG, MLOG_1BYTE, mtr);
+ mtr->write<1>(block, rec, *rec | REC_INFO_MIN_REC_FLAG);
}
/** Seek to the parent page of a B-tree page.
diff --git a/storage/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic
index e06127efaa3..427bd130b79 100644
--- a/storage/innobase/include/btr0btr.ic
+++ b/storage/innobase/include/btr0btr.ic
@@ -30,28 +30,6 @@ Created 6/2/1994 Heikki Tuuri
#include "page0zip.h"
/**************************************************************//**
-Sets the index id field of a page. */
-UNIV_INLINE
-void
-btr_page_set_index_id(
-/*==================*/
- page_t* page, /*!< in: page to be created */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
- index_id_t id, /*!< in: index id */
- mtr_t* mtr) /*!< in: mtr */
-{
- if (page_zip) {
- mach_write_to_8(page + (PAGE_HEADER + PAGE_INDEX_ID), id);
- page_zip_write_header(page_zip,
- page + (PAGE_HEADER + PAGE_INDEX_ID),
- 8, mtr);
- } else {
- mlog_write_ull(page + (PAGE_HEADER + PAGE_INDEX_ID), id, mtr);
- }
-}
-
-/**************************************************************//**
Gets the index id field of a page.
@return index id */
UNIV_INLINE
@@ -63,77 +41,56 @@ btr_page_get_index_id(
return(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID));
}
-/********************************************************//**
-Sets the node level field in an index page. */
-UNIV_INLINE
-void
-btr_page_set_level(
-/*===============*/
- page_t* page, /*!< in: index page */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
- ulint level, /*!< in: level, leaf level == 0 */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Set PAGE_LEVEL.
+@param[in,out] block buffer block
+@param[in] level page level
+@param[in,out] mtr mini-transaction */
+inline
+void btr_page_set_level(buf_block_t *block, ulint level, mtr_t *mtr)
{
- ut_ad(page != NULL);
- ut_ad(mtr != NULL);
- ut_ad(level <= BTR_MAX_NODE_LEVEL);
-
- if (page_zip) {
- mach_write_to_2(page + (PAGE_HEADER + PAGE_LEVEL), level);
- page_zip_write_header(page_zip,
- page + (PAGE_HEADER + PAGE_LEVEL),
- 2, mtr);
- } else {
- mlog_write_ulint(page + (PAGE_HEADER + PAGE_LEVEL), level,
- MLOG_2BYTES, mtr);
- }
+ ut_ad(level <= BTR_MAX_NODE_LEVEL);
+
+ byte *page_level= PAGE_HEADER + PAGE_LEVEL + block->frame;
+
+ if (UNIV_LIKELY_NULL(block->page.zip.data))
+ {
+ mach_write_to_2(page_level, level);
+ page_zip_write_header(&block->page.zip, page_level, 2, mtr);
+ }
+ else
+ mtr->write<2,mtr_t::OPT>(*block, page_level, level);
}
-/********************************************************//**
-Sets the next index page field. */
-UNIV_INLINE
-void
-btr_page_set_next(
-/*==============*/
- page_t* page, /*!< in: index page */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
- ulint next, /*!< in: next page number */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Set FIL_PAGE_NEXT.
+@param[in,out] block buffer block
+@param[in] next number of successor page
+@param[in,out] mtr mini-transaction */
+inline void btr_page_set_next(buf_block_t *block, ulint next, mtr_t *mtr)
{
- ut_ad(page != NULL);
- ut_ad(mtr != NULL);
-
- if (page_zip) {
- mach_write_to_4(page + FIL_PAGE_NEXT, next);
- page_zip_write_header(page_zip, page + FIL_PAGE_NEXT, 4, mtr);
- } else {
- mlog_write_ulint(page + FIL_PAGE_NEXT, next, MLOG_4BYTES, mtr);
- }
+ byte *fil_page_next= block->frame + FIL_PAGE_NEXT;
+ if (UNIV_LIKELY_NULL(block->page.zip.data))
+ {
+ mach_write_to_4(fil_page_next, next);
+ page_zip_write_header(&block->page.zip, fil_page_next, 4, mtr);
+ }
+ else
+ mtr->write<4>(*block, fil_page_next, next);
}
-/********************************************************//**
-Sets the previous index page field. */
-UNIV_INLINE
-void
-btr_page_set_prev(
-/*==============*/
- page_t* page, /*!< in: index page */
- page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed
- part will be updated, or NULL */
- ulint prev, /*!< in: previous page number */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/** Set FIL_PAGE_PREV.
+@param[in,out] block buffer block
+@param[in] prev number of predecessor page
+@param[in,out] mtr mini-transaction */
+inline void btr_page_set_prev(buf_block_t *block, ulint prev, mtr_t *mtr)
{
- ut_ad(page != NULL);
- ut_ad(mtr != NULL);
-
- if (page_zip) {
- mach_write_to_4(page + FIL_PAGE_PREV, prev);
- page_zip_write_header(page_zip, page + FIL_PAGE_PREV, 4, mtr);
- } else {
- mlog_write_ulint(page + FIL_PAGE_PREV, prev, MLOG_4BYTES, mtr);
- }
+ byte *fil_page_prev= block->frame + FIL_PAGE_PREV;
+ if (UNIV_LIKELY_NULL(block->page.zip.data))
+ {
+ mach_write_to_4(fil_page_prev, prev);
+ page_zip_write_header(&block->page.zip, fil_page_prev, 4, mtr);
+ }
+ else
+ mtr->write<4>(*block, fil_page_prev, prev);
}
/**************************************************************//**
diff --git a/storage/innobase/include/btr0bulk.h b/storage/innobase/include/btr0bulk.h
index d1a8af22d68..6bed9e47b17 100644
--- a/storage/innobase/include/btr0bulk.h
+++ b/storage/innobase/include/btr0bulk.h
@@ -101,11 +101,25 @@ public:
/** Insert a record in the page.
@param[in] rec record
@param[in] offsets record offsets */
- void insert(const rec_t* rec, ulint* offsets);
+ inline void insert(const rec_t* rec, ulint* offsets);
+private:
+ /** Page format */
+ enum format { REDUNDANT, DYNAMIC, COMPRESSED };
+ /** Mark end of insertion to the page. Scan all records to set page
+ dirs, and set page header members.
+ @tparam format the page format */
+ template<format> inline void finishPage();
+ /** Insert a record in the page.
+ @tparam format the page format
+ @param[in] rec record
+ @param[in] offsets record offsets */
+ template<format> inline void insertPage(const rec_t* rec,
+ ulint* offsets);
+public:
/** Mark end of insertion to the page. Scan all records to set page
dirs, and set page header members. */
- void finish();
+ inline void finish();
/** Commit mtr for a page
@param[in] success Flag whether all inserts succeed. */
@@ -199,6 +213,8 @@ public:
return(m_err);
}
+ void set_modified() { m_mtr.set_modified(); }
+
/* Memory heap for internal allocation */
mem_heap_t* m_heap;
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index bcdbb08e81f..659232bc7fc 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -646,8 +646,7 @@ to free the field. */
void
btr_cur_disown_inherited_fields(
/*============================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
- part will be updated, or NULL */
+ buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
@@ -722,12 +721,12 @@ btr_free_externally_stored_field(
page_zip_write_blob_ptr(), or NULL */
const ulint* offsets, /*!< in: rec_get_offsets(rec, index),
or NULL */
- page_zip_des_t* page_zip, /*!< in: compressed page corresponding
- to rec, or NULL if rec == NULL */
+ buf_block_t* block, /*!< in/out: page of field_ref */
ulint i, /*!< in: field number of field_ref;
ignored if rec == NULL */
bool rollback, /*!< in: performing rollback? */
- mtr_t* local_mtr); /*!< in: mtr containing the latch */
+ mtr_t* local_mtr) /*!< in: mtr containing the latch */
+ MY_ATTRIBUTE((nonnull(1,2,5,8)));
/** Copies the prefix of an externally stored field of a record.
The clustered index record must be protected by a lock or a page latch.
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index a8a37dfd0a2..3253b864786 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -717,16 +717,6 @@ inline void aligned_free(void *ptr)
}
/**********************************************************************//**
-Gets the space id, page offset, and byte offset within page of a
-pointer pointing to a buffer frame containing a file page. */
-UNIV_INLINE
-void
-buf_ptr_get_fsp_addr(
-/*=================*/
- const void* ptr, /*!< in: pointer to a buffer frame */
- ulint* space, /*!< out: space id */
- fil_addr_t* addr); /*!< out: page offset and byte offset */
-/**********************************************************************//**
Gets the hash value of a block. This can be used in searches in the
lock hash table.
@return lock hash value */
@@ -1094,9 +1084,9 @@ buf_block_get_frame(
Gets the compressed page descriptor corresponding to an uncompressed page
if applicable. */
#define buf_block_get_page_zip(block) \
- ((block)->page.zip.data ? &(block)->page.zip : NULL)
+ (UNIV_LIKELY_NULL((block)->page.zip.data) ? &(block)->page.zip : NULL)
#define is_buf_block_get_page_zip(block) \
- ((block)->page.zip.data != 0)
+ UNIV_LIKELY_NULL((block)->page.zip.data)
#ifdef BTR_CUR_HASH_ADAPT
/** Get a buffer block from an adaptive hash index pointer.
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 59672dc3295..45cafcdf3fe 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -759,25 +759,6 @@ buf_frame_align(
}
/**********************************************************************//**
-Gets the space id, page offset, and byte offset within page of a
-pointer pointing to a buffer frame containing a file page. */
-UNIV_INLINE
-void
-buf_ptr_get_fsp_addr(
-/*=================*/
- const void* ptr, /*!< in: pointer to a buffer frame */
- ulint* space, /*!< out: space id */
- fil_addr_t* addr) /*!< out: page offset and byte offset */
-{
- const page_t* page = (const page_t*) ut_align_down(ptr,
- srv_page_size);
-
- *space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
- addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET);
- addr->boffset = static_cast<uint16_t>(ut_align_offset(ptr, srv_page_size));
-}
-
-/**********************************************************************//**
Gets the hash value of the page the pointer is pointing to. This can be used
in searches in the lock hash table.
@return lock hash value */
diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h
index 778471b77ae..3144303c1a1 100644
--- a/storage/innobase/include/dict0boot.h
+++ b/storage/innobase/include/dict0boot.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, MariaDB Corporation.
+Copyright (c) 2018, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,15 +33,8 @@ Created 4/18/1996 Heikki Tuuri
#include "buf0buf.h"
#include "dict0dict.h"
-typedef byte dict_hdr_t;
-
-/**********************************************************************//**
-Gets a pointer to the dictionary header and x-latches its page.
-@return pointer to the dictionary header, page x-latched */
-dict_hdr_t*
-dict_hdr_get(
-/*=========*/
- mtr_t* mtr); /*!< in: mtr */
+/** @return the DICT_HDR block, x-latched */
+buf_block_t *dict_hdr_get(mtr_t* mtr);
/**********************************************************************//**
Returns a new table, index, or space id. */
void
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index a87f1fb2fe6..71b1ed6a3f1 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -101,7 +101,6 @@ see the table in fsp0types.h @{ */
#define FSP_HEADER_OFFSET FIL_PAGE_DATA
/* The data structures in files are defined just as byte strings in C */
-typedef byte fsp_header_t;
typedef byte xdes_t;
/* SPACE HEADER
@@ -207,7 +206,7 @@ typedef byte fseg_inode_t;
(16 + 3 * FLST_BASE_NODE_SIZE \
+ FSEG_FRAG_ARR_N_SLOTS * FSEG_FRAG_SLOT_SIZE)
-#define FSEG_MAGIC_N_VALUE 97937874
+static constexpr uint32_t FSEG_MAGIC_N_VALUE= 97937874;
#define FSEG_FILLFACTOR 8 /* If this value is x, then if
the number of unused but reserved
@@ -534,7 +533,7 @@ by repeatedly calling this function in different mini-transactions.
Doing the freeing in a single mini-transaction might result in
too big a mini-transaction.
@return TRUE if freeing completed */
-ibool
+bool
fseg_free_step_func(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list
@@ -554,8 +553,8 @@ fseg_free_step_func(
/**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed.
-@return TRUE if freeing completed, except the header page */
-ibool
+@return true if freeing completed, except the header page */
+bool
fseg_free_step_not_header_func(
fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */
diff --git a/storage/innobase/include/fut0lst.h b/storage/innobase/include/fut0lst.h
index d009e5b050a..d1205a41251 100644
--- a/storage/innobase/include/fut0lst.h
+++ b/storage/innobase/include/fut0lst.h
@@ -68,79 +68,91 @@ typedef byte flst_node_t;
@param[in,out] block file page
@param[in] ofs byte offset of the list base node
@param[in,out] mtr mini-transaction */
-inline void flst_init(buf_block_t* block, uint16_t ofs, mtr_t* mtr)
+inline void flst_init(const buf_block_t* block, uint16_t ofs, mtr_t* mtr)
{
- ut_ad(0 == mach_read_from_2(FLST_LEN + ofs + block->frame));
- ut_ad(0 == mach_read_from_2(FLST_FIRST + FIL_ADDR_BYTE + ofs
- + block->frame));
- ut_ad(0 == mach_read_from_2(FLST_LAST + FIL_ADDR_BYTE + ofs
- + block->frame));
- compile_time_assert(FIL_NULL == 0xffU * 0x1010101U);
- mlog_memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
- mlog_memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
+ ut_ad(!mach_read_from_2(FLST_LEN + ofs + block->frame));
+ ut_ad(!mach_read_from_2(FLST_FIRST + FIL_ADDR_BYTE + ofs + block->frame));
+ ut_ad(!mach_read_from_2(FLST_LAST + FIL_ADDR_BYTE + ofs + block->frame));
+ compile_time_assert(FIL_NULL == 0xffU * 0x1010101U);
+ mlog_memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
+ mlog_memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
}
/** Write a null file address.
-@param[in,out] faddr file address to be zeroed otu
-@param[in,out] mtr mini-transaction */
-inline void flst_zero_addr(fil_faddr_t* faddr, mtr_t* mtr)
+@param[in] b file page
+@param[in,out] addr file address to be zeroed out
+@param[in,out] mtr mini-transaction */
+inline void flst_zero_addr(const buf_block_t& b, fil_faddr_t *addr, mtr_t *mtr)
+{
+ if (mach_read_from_4(addr + FIL_ADDR_PAGE) != FIL_NULL)
+ mlog_memset(&b, ulint(addr - b.frame) + FIL_ADDR_PAGE, 4, 0xff, mtr);
+ mtr->write<2,mtr_t::OPT>(b, addr + FIL_ADDR_BYTE, 0U);
+}
+
+/** Write a file address.
+@param[in] block file page
+@param[in,out] faddr file address location
+@param[in] addr file address to be written out
+@param[in,out] mtr mini-transaction */
+inline void flst_write_addr(const buf_block_t& block, fil_faddr_t *faddr,
+ fil_addr_t addr, mtr_t* mtr)
+{
+ ut_ad(mtr->memo_contains_page_flagged(faddr,
+ MTR_MEMO_PAGE_X_FIX
+ | MTR_MEMO_PAGE_SX_FIX));
+ ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA);
+ ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA);
+
+ mtr->write<4,mtr_t::OPT>(block, faddr + FIL_ADDR_PAGE, addr.page);
+ mtr->write<2,mtr_t::OPT>(block, faddr + FIL_ADDR_BYTE, addr.boffset);
+}
+
+/** Initialize a list base node.
+@param[in] block file page
+@param[in,out] base base node
+@param[in,out] mtr mini-transaction */
+inline void flst_init(const buf_block_t& block, byte *base, mtr_t *mtr)
{
- if (mach_read_from_4(faddr + FIL_ADDR_PAGE) != FIL_NULL) {
- mlog_memset(faddr + FIL_ADDR_PAGE, 4, 0xff, mtr);
- }
- if (mach_read_from_2(faddr + FIL_ADDR_BYTE)) {
- mlog_write_ulint(faddr + FIL_ADDR_BYTE, 0, MLOG_2BYTES, mtr);
- }
+ ut_ad(mtr->memo_contains_page_flagged(base, MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+ mtr->write<4,mtr_t::OPT>(block, base + FLST_LEN, 0U);
+ flst_zero_addr(block, base + FLST_FIRST, mtr);
+ flst_zero_addr(block, base + FLST_LAST, mtr);
}
-/********************************************************************//**
-Initializes a list base node. */
-UNIV_INLINE
-void
-flst_init(
-/*======*/
- flst_base_node_t* base, /*!< in: pointer to base node */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-/********************************************************************//**
-Adds a node as the last node in a list. */
-void
-flst_add_last(
-/*==========*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node, /*!< in: node to add */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-/********************************************************************//**
-Adds a node as the first node in a list. */
-void
-flst_add_first(
-/*===========*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node, /*!< in: node to add */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-/********************************************************************//**
-Removes a node. */
-void
-flst_remove(
-/*========*/
- flst_base_node_t* base, /*!< in: pointer to base node of list */
- flst_node_t* node2, /*!< in: node to remove */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-/** Get the length of a list.
-@param[in] base base node
-@return length */
-UNIV_INLINE
-uint32_t
-flst_get_len(
- const flst_base_node_t* base);
-/********************************************************************//**
-Writes a file address. */
-UNIV_INLINE
-void
-flst_write_addr(
-/*============*/
- fil_faddr_t* faddr, /*!< in: pointer to file faddress */
- fil_addr_t addr, /*!< in: file address */
- mtr_t* mtr); /*!< in: mini-transaction handle */
+/** Append a file list node to a list.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] add block to be added
+@param[in] aoffset byte offset of the node to be added
+@param[in,outr] mtr mini-transaction */
+void flst_add_last(buf_block_t *base, uint16_t boffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
+ MY_ATTRIBUTE((nonnull));
+/** Prepend a file list node to a list.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] add block to be added
+@param[in] aoffset byte offset of the node to be added
+@param[in,outr] mtr mini-transaction */
+void flst_add_first(buf_block_t *base, uint16_t boffset,
+ buf_block_t *add, uint16_t aoffset, mtr_t *mtr)
+ MY_ATTRIBUTE((nonnull));
+/** Remove a file list node.
+@param[in,out] base base node block
+@param[in] boffset byte offset of the base node
+@param[in,out] cur block to be removed
+@param[in] coffset byte offset of the current record to be removed
+@param[in,outr] mtr mini-transaction */
+void flst_remove(buf_block_t *base, uint16_t boffset,
+ buf_block_t *cur, uint16_t coffset, mtr_t *mtr)
+ MY_ATTRIBUTE((nonnull));
+
+/** @return the length of a list */
+inline uint32_t flst_get_len(const flst_base_node_t *base)
+{
+ return mach_read_from_4(base + FLST_LEN);
+}
/** @return a file address */
inline fil_addr_t flst_read_addr(const fil_faddr_t *faddr)
@@ -176,16 +188,10 @@ inline fil_addr_t flst_get_prev_addr(const flst_node_t *node)
return flst_read_addr(node + FLST_PREV);
}
-/********************************************************************//**
-Validates a file-based list.
-@return TRUE if ok */
-ibool
-flst_validate(
-/*==========*/
- const flst_base_node_t* base, /*!< in: pointer to base node of list */
- mtr_t* mtr1); /*!< in: mtr */
-
-#include "fut0lst.ic"
+#ifdef UNIV_DEBUG
+/** Validate a file-based list. */
+void flst_validate(const buf_block_t *base, uint16_t boffset, mtr_t *mtr);
+#endif
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/fut0lst.ic b/storage/innobase/include/fut0lst.ic
deleted file mode 100644
index b672001f660..00000000000
--- a/storage/innobase/include/fut0lst.ic
+++ /dev/null
@@ -1,80 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2019, MariaDB Corporation.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/******************************************************************//**
-@file include/fut0lst.ic
-File-based list utilities
-
-Created 11/28/1995 Heikki Tuuri
-***********************************************************************/
-
-#include "buf0buf.h"
-
-/********************************************************************//**
-Writes a file address. */
-UNIV_INLINE
-void
-flst_write_addr(
-/*============*/
- fil_faddr_t* faddr, /*!< in: pointer to file faddress */
- fil_addr_t addr, /*!< in: file address */
- mtr_t* mtr) /*!< in: mini-transaction handle */
-{
- ut_ad(faddr && mtr);
- ut_ad(mtr_memo_contains_page_flagged(mtr, faddr,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
- ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA);
- ut_a(ut_align_offset(faddr, srv_page_size) >= FIL_PAGE_DATA);
-
- mlog_write_ulint(faddr + FIL_ADDR_PAGE, addr.page, MLOG_4BYTES, mtr);
- mlog_write_ulint(faddr + FIL_ADDR_BYTE, addr.boffset,
- MLOG_2BYTES, mtr);
-}
-
-/********************************************************************//**
-Initializes a list base node. */
-UNIV_INLINE
-void
-flst_init(
-/*======*/
- flst_base_node_t* base, /*!< in: pointer to base node */
- mtr_t* mtr) /*!< in: mini-transaction handle */
-{
- ut_ad(mtr_memo_contains_page_flagged(mtr, base,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
-
- if (mach_read_from_4(base + FLST_LEN)) {
- mlog_write_ulint(base + FLST_LEN, 0, MLOG_4BYTES, mtr);
- }
- flst_zero_addr(base + FLST_FIRST, mtr);
- flst_zero_addr(base + FLST_LAST, mtr);
-}
-
-/** Get the length of a list.
-@param[in] base base node
-@return length */
-UNIV_INLINE
-uint32_t
-flst_get_len(
- const flst_base_node_t* base)
-{
- return(mach_read_from_4(base + FLST_LEN));
-}
diff --git a/storage/innobase/include/mtr0log.h b/storage/innobase/include/mtr0log.h
index 6ebf834456c..8742e55b51a 100644
--- a/storage/innobase/include/mtr0log.h
+++ b/storage/innobase/include/mtr0log.h
@@ -34,26 +34,6 @@ Created 12/7/1995 Heikki Tuuri
struct dict_index_t;
/********************************************************//**
-Writes 1, 2 or 4 bytes to a file page. Writes the corresponding log
-record to the mini-transaction log if mtr is not NULL. */
-void
-mlog_write_ulint(
-/*=============*/
- byte* ptr, /*!< in: pointer where to write */
- ulint val, /*!< in: value to write */
- mlog_id_t type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-
-/********************************************************//**
-Writes 8 bytes to a file page. Writes the corresponding log
-record to the mini-transaction log, only if mtr is not NULL */
-void
-mlog_write_ull(
-/*===========*/
- byte* ptr, /*!< in: pointer where to write */
- ib_uint64_t val, /*!< in: value to write */
- mtr_t* mtr); /*!< in: mini-transaction handle */
-/********************************************************//**
Writes a string to a file page buffered in the buffer pool. Writes the
corresponding log record to the mini-transaction log. */
void
@@ -80,7 +60,7 @@ mlog_log_string(
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void
-mlog_memset(buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr);
+mlog_memset(const buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr);
/** Initialize a string of bytes.
@param[in,out] byte byte address
@@ -124,14 +104,6 @@ mlog_catenate_ulint_compressed(
mtr_t* mtr, /*!< in: mtr */
ulint val); /*!< in: value to write */
/********************************************************//**
-Catenates a compressed 64-bit integer to mlog. */
-UNIV_INLINE
-void
-mlog_catenate_ull_compressed(
-/*=========================*/
- mtr_t* mtr, /*!< in: mtr */
- ib_uint64_t val); /*!< in: value to write */
-/********************************************************//**
Opens a buffer to mlog. It must be closed with mlog_close.
@return buffer, NULL if log mode MTR_LOG_NONE */
UNIV_INLINE
@@ -151,6 +123,56 @@ mlog_close(
byte* ptr); /*!< in: buffer space from ptr up was
not used */
+/** Write 1, 2, 4, or 8 bytes to a file page.
+@param[in] block file page
+@param[in,out] ptr pointer in file page
+@param[in] val value to write
+@tparam l number of bytes to write
+@tparam w write request type
+@tparam V type of val */
+template<unsigned l,mtr_t::write_type w,typename V>
+inline void mtr_t::write(const buf_block_t &block, byte *ptr, V val)
+{
+ ut_ad(ut_align_down(ptr, srv_page_size) == block.frame);
+ ut_ad(m_log_mode == MTR_LOG_NONE || m_log_mode == MTR_LOG_NO_REDO ||
+ !block.page.zip.data ||
+ /* written by fil_crypt_rotate_page() or innodb_make_page_dirty()? */
+ (w == FORCED && l == 1 && ptr == &block.frame[FIL_PAGE_SPACE_ID]) ||
+ mach_read_from_2(block.frame + FIL_PAGE_TYPE) <= FIL_PAGE_TYPE_ZBLOB2);
+ static_assert(l == 1 || l == 2 || l == 4 || l == 8, "wrong length");
+
+ switch (l) {
+ case 1:
+ if (w == OPT && mach_read_from_1(ptr) == val) return;
+ ut_ad(w != NORMAL || mach_read_from_1(ptr) != val);
+ mach_write_to_1(ptr, val);
+ break;
+ case 2:
+ if (w == OPT && mach_read_from_2(ptr) == val) return;
+ ut_ad(w != NORMAL || mach_read_from_2(ptr) != val);
+ mach_write_to_2(ptr, val);
+ break;
+ case 4:
+ if (w == OPT && mach_read_from_4(ptr) == val) return;
+ ut_ad(w != NORMAL || mach_read_from_4(ptr) != val);
+ mach_write_to_4(ptr, val);
+ break;
+ case 8:
+ if (w == OPT && mach_read_from_8(ptr) == val) return;
+ ut_ad(w != NORMAL || mach_read_from_8(ptr) != val);
+ mach_write_to_8(ptr, val);
+ break;
+ }
+ byte *log_ptr= mlog_open(this, 11 + 2 + (l == 8 ? 9 : 5));
+ if (!log_ptr)
+ return;
+ if (l == 8)
+ log_write(block, ptr, static_cast<mlog_id_t>(l), log_ptr, uint64_t{val});
+ else
+ log_write(block, ptr, static_cast<mlog_id_t>(l), log_ptr,
+ static_cast<uint32_t>(val));
+}
+
/** Writes a log record about an operation.
@param[in] type redo log record type
@param[in] space_id tablespace identifier
@@ -195,7 +217,7 @@ mlog_parse_initial_log_record(
ulint* space, /*!< out: space id */
ulint* page_no);/*!< out: page number */
/********************************************************//**
-Parses a log record written by mlog_write_ulint, mlog_write_ull, mlog_memset.
+Parses a log record written by mtr_t::write(), mlog_memset().
@return parsed record end, NULL if not a complete record */
const byte*
mlog_parse_nbytes(
diff --git a/storage/innobase/include/mtr0log.ic b/storage/innobase/include/mtr0log.ic
index 371d9ec905d..d493c0959a9 100644
--- a/storage/innobase/include/mtr0log.ic
+++ b/storage/innobase/include/mtr0log.ic
@@ -141,30 +141,6 @@ mlog_catenate_ulint_compressed(
mlog_close(mtr, log_ptr);
}
-/********************************************************//**
-Catenates a compressed 64-bit integer to mlog. */
-UNIV_INLINE
-void
-mlog_catenate_ull_compressed(
-/*=========================*/
- mtr_t* mtr, /*!< in: mtr */
- ib_uint64_t val) /*!< in: value to write */
-{
- byte* log_ptr;
-
- log_ptr = mlog_open(mtr, 15);
-
- /* If no logging is requested, we may return now */
- if (log_ptr == NULL) {
-
- return;
- }
-
- log_ptr += mach_u64_write_compressed(log_ptr, val);
-
- mlog_close(mtr, log_ptr);
-}
-
/** Writes a log record about an operation.
@param[in] type redo log record type
@param[in] space_id tablespace identifier
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index ee1e3eadd71..f7df66f0ea4 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -425,7 +425,50 @@ struct mtr_t {
static inline bool is_block_dirtied(const buf_block_t* block)
MY_ATTRIBUTE((warn_unused_result));
+ /** Write request types */
+ enum write_type
+ {
+ /** the page is guaranteed to always change */
+ NORMAL= 0,
+ /** optional: the page contents might not change */
+ OPT,
+ /** force a write, even if the page contents is not changing */
+ FORCED
+ };
+
+ /** Write 1, 2, 4, or 8 bytes to a file page.
+ @param[in] block file page
+ @param[in,out] ptr pointer in file page
+ @param[in] val value to write
+ @tparam l number of bytes to write
+ @tparam w write request type
+ @tparam V type of val */
+ template<unsigned l,write_type w= NORMAL,typename V>
+ inline void write(const buf_block_t &block, byte *ptr, V val)
+ MY_ATTRIBUTE((nonnull));
+
private:
+ /**
+ Write a log record for writing 1, 2, or 4 bytes.
+ @param[in] block file page
+ @param[in,out] ptr pointer in file page
+ @param[in] l number of bytes to write
+ @param[in,out] log_ptr log record buffer
+ @param[in] val value to write */
+ void log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
+ byte *log_ptr, uint32_t val)
+ MY_ATTRIBUTE((nonnull));
+ /**
+ Write a log record for writing 8 bytes.
+ @param[in] block file page
+ @param[in,out] ptr pointer in file page
+ @param[in] l number of bytes to write (8)
+ @param[in,out] log_ptr log record buffer
+ @param[in] val value to write */
+ void log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
+ byte *log_ptr, uint64_t val)
+ MY_ATTRIBUTE((nonnull));
+
/** Prepare to write the mini-transaction log to the redo log buffer.
@return number of bytes to write in finish_write() */
inline ulint prepare_write();
diff --git a/storage/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h
index 7b5fd457d9f..06ac4a62e78 100644
--- a/storage/innobase/include/mtr0types.h
+++ b/storage/innobase/include/mtr0types.h
@@ -52,7 +52,7 @@ enum mtr_log_t {
/** @name Log item types
The log items are declared 'byte' so that the compiler can warn if val
-and type parameters are switched in a call to mlog_write_ulint. NOTE!
+and type parameters are switched in a call to mlog_write. NOTE!
For 1 - 8 bytes, the flag value must give the length also! @{ */
enum mlog_id_t {
/** if the mtr contains only one log record for one page,
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index 7c9f85c47c7..a438d111086 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -31,6 +31,7 @@ Created 2/2/1994 Heikki Tuuri
#include "fil0fil.h"
#include "buf0buf.h"
#include "rem0rec.h"
+#include "mach0data.h"
#ifndef UNIV_INNOCHECKSUM
#include "dict0dict.h"
#include "data0data.h"
@@ -42,8 +43,6 @@ Created 2/2/1994 Heikki Tuuri
Index page header starts at the first offset left free by the FIL-module */
typedef byte page_header_t;
-#else
-# include "mach0data.h"
#endif /* !UNIV_INNOCHECKSUM */
#define PAGE_HEADER FSEG_PAGE_DATA /* index page header starts at this
@@ -393,13 +392,17 @@ inline
bool
page_rec_is_infimum(const rec_t* rec);
-/*************************************************************//**
-Returns the max trx id field value. */
-UNIV_INLINE
-trx_id_t
-page_get_max_trx_id(
-/*================*/
- const page_t* page); /*!< in: page */
+/** Read PAGE_MAX_TRX_ID.
+@param[in] page index page
+@return the value of PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC */
+inline trx_id_t page_get_max_trx_id(const page_t *page)
+{
+ static_assert((PAGE_HEADER + PAGE_MAX_TRX_ID) % 8 == 0, "alignment");
+ const byte *p= static_cast<const byte*>
+ (MY_ASSUME_ALIGNED(page + PAGE_HEADER + PAGE_MAX_TRX_ID, 8));
+ return mach_read_from_8(p);
+}
+
/*************************************************************//**
Sets the max trx id field value. */
void
@@ -424,7 +427,6 @@ page_update_max_trx_id(
/** Persist the AUTO_INCREMENT value on a clustered index root page.
@param[in,out] block clustered index root page
-@param[in] index clustered index
@param[in] autoinc next available AUTO_INCREMENT value
@param[in,out] mtr mini-transaction
@param[in] reset whether to reset the AUTO_INCREMENT
@@ -433,7 +435,6 @@ page_update_max_trx_id(
void
page_set_autoinc(
buf_block_t* block,
- const dict_index_t* index MY_ATTRIBUTE((unused)),
ib_uint64_t autoinc,
mtr_t* mtr,
bool reset)
@@ -517,17 +518,12 @@ page_header_set_ptr(
ulint field, /*!< in/out: PAGE_FREE, ... */
const byte* ptr); /*!< in: pointer or NULL*/
-/*************************************************************//**
-Resets the last insert info field in the page header. Writes to mlog
-about this operation. */
-UNIV_INLINE
-void
-page_header_reset_last_insert(
-/*==========================*/
- page_t* page, /*!< in: page */
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose
- uncompressed part will be updated, or NULL */
- mtr_t* mtr); /*!< in: mtr */
+/**
+Reset PAGE_LAST_INSERT.
+@param[in,out] block file page
+@param[in,out] mtr mini-transaction */
+inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr)
+ MY_ATTRIBUTE((nonnull));
#define page_get_infimum_rec(page) ((page) + page_get_infimum_offset(page))
#define page_get_supremum_rec(page) ((page) + page_get_supremum_offset(page))
@@ -663,14 +659,17 @@ ibool
page_rec_check(
/*===========*/
const rec_t* rec); /*!< in: record */
-/***************************************************************//**
-Gets the record pointed to by a directory slot.
+/** Get the record pointed to by a directory slot.
+@param[in] slot directory slot
@return pointer to record */
-UNIV_INLINE
-const rec_t*
-page_dir_slot_get_rec(
-/*==================*/
- const page_dir_slot_t* slot); /*!< in: directory slot */
+inline rec_t *page_dir_slot_get_rec(page_dir_slot_t *slot)
+{
+ return page_align(slot) + mach_read_from_2(slot);
+}
+inline const rec_t *page_dir_slot_get_rec(const page_dir_slot_t *slot)
+{
+ return page_dir_slot_get_rec(const_cast<rec_t*>(slot));
+}
/***************************************************************//**
This is used to set the record offset in a directory slot. */
UNIV_INLINE
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index d13f5732faf..ccc76c7ce3b 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -28,25 +28,11 @@ Created 2/2/1994 Heikki Tuuri
#define page0page_ic
#ifndef UNIV_INNOCHECKSUM
-#include "mach0data.h"
#include "rem0cmp.h"
#include "mtr0log.h"
#include "page0zip.h"
/*************************************************************//**
-Returns the max trx id field value. */
-UNIV_INLINE
-trx_id_t
-page_get_max_trx_id(
-/*================*/
- const page_t* page) /*!< in: page */
-{
- ut_ad(page);
-
- return(mach_read_from_8(page + PAGE_HEADER + PAGE_MAX_TRX_ID));
-}
-
-/*************************************************************//**
Sets the max trx id field value if trx_id is bigger than the previous
value. */
UNIV_INLINE
@@ -115,21 +101,16 @@ page_set_ssn_id(
node_seq_t ssn_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- page_t* page = buf_block_get_frame(block);
-
ut_ad(!mtr || mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_SX_FIX
| MTR_MEMO_PAGE_X_FIX));
- if (page_zip) {
- mach_write_to_8(page + FIL_RTREE_SPLIT_SEQ_NUM, ssn_id);
- page_zip_write_header(page_zip,
- page + FIL_RTREE_SPLIT_SEQ_NUM,
- 8, mtr);
- } else if (mtr) {
- mlog_write_ull(page + FIL_RTREE_SPLIT_SEQ_NUM, ssn_id, mtr);
+ byte* ssn = block->frame + FIL_RTREE_SPLIT_SEQ_NUM;
+ if (UNIV_LIKELY_NULL(page_zip)) {
+ mach_write_to_8(ssn, ssn_id);
+ page_zip_write_header(page_zip, ssn, 8, mtr);
} else {
- mach_write_to_8(page + FIL_RTREE_SPLIT_SEQ_NUM, ssn_id);
+ mtr->write<8,mtr_t::OPT>(*block, ssn, ssn_id);
}
}
@@ -229,30 +210,21 @@ page_header_set_ptr(
page_header_set_field(page, page_zip, field, offs);
}
-/*************************************************************//**
-Resets the last insert info field in the page header. Writes to mlog
-about this operation. */
-UNIV_INLINE
-void
-page_header_reset_last_insert(
-/*==========================*/
- page_t* page, /*!< in/out: page */
- page_zip_des_t* page_zip,/*!< in/out: compressed page whose
- uncompressed part will be updated, or NULL */
- mtr_t* mtr) /*!< in: mtr */
+/**
+Reset PAGE_LAST_INSERT.
+@param[in,out] block file page
+@param[in,out] mtr mini-transaction */
+inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr)
{
- ut_ad(page != NULL);
- ut_ad(mtr != NULL);
+ byte *b= &block->frame[PAGE_HEADER + PAGE_LAST_INSERT];
- if (page_zip) {
- mach_write_to_2(page + (PAGE_HEADER + PAGE_LAST_INSERT), 0);
- page_zip_write_header(page_zip,
- page + (PAGE_HEADER + PAGE_LAST_INSERT),
- 2, mtr);
- } else {
- mlog_write_ulint(page + (PAGE_HEADER + PAGE_LAST_INSERT), 0,
- MLOG_2BYTES, mtr);
- }
+ if (UNIV_LIKELY_NULL(block->page.zip.data))
+ {
+ mach_write_to_2(b, 0);
+ page_zip_write_header(&block->page.zip, b, 2, mtr);
+ }
+ else
+ mtr->write<2,mtr_t::OPT>(*block, b, 0U);
}
/***************************************************************//**
@@ -542,18 +514,6 @@ page_rec_check(
}
/***************************************************************//**
-Gets the record pointed to by a directory slot.
-@return pointer to record */
-UNIV_INLINE
-const rec_t*
-page_dir_slot_get_rec(
-/*==================*/
- const page_dir_slot_t* slot) /*!< in: directory slot */
-{
- return(page_align(slot) + mach_read_from_2(slot));
-}
-
-/***************************************************************//**
This is used to set the record offset in a directory slot. */
UNIV_INLINE
void
diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index 43ae66afeb5..ec445cd4d0c 100644
--- a/storage/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
@@ -184,15 +184,15 @@ public:
to purge */
trx_rseg_t* rseg; /*!< Rollback segment for the next undo
record to purge */
- ulint page_no; /*!< Page number for the next undo
+ uint32_t page_no; /*!< Page number for the next undo
record to purge, page number of the
log header, if dummy record */
- ulint offset; /*!< Page offset for the next undo
+ uint32_t hdr_page_no; /*!< Header page of the undo log where
+ the next record to purge belongs */
+ uint16_t offset; /*!< Page offset for the next undo
record to purge, 0 if the dummy
record */
- ulint hdr_page_no; /*!< Header page of the undo log where
- the next record to purge belongs */
- ulint hdr_offset; /*!< Header byte offset on the page */
+ uint16_t hdr_offset; /*!< Header byte offset on the page */
TrxUndoRsegsIterator
diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h
index d4fdb19a988..29405997e5d 100644
--- a/storage/innobase/include/trx0rseg.h
+++ b/storage/innobase/include/trx0rseg.h
@@ -36,7 +36,7 @@ Created 3/26/1996 Heikki Tuuri
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
-trx_rsegf_t*
+buf_block_t*
trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr);
/** Gets a newly created rollback segment header.
@@ -45,29 +45,12 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr);
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
-trx_rsegf_t*
+buf_block_t*
trx_rsegf_get_new(
ulint space,
ulint page_no,
mtr_t* mtr);
-/***************************************************************//**
-Sets the file page number of the nth undo log slot. */
-UNIV_INLINE
-void
-trx_rsegf_set_nth_undo(
-/*===================*/
- trx_rsegf_t* rsegf, /*!< in: rollback segment header */
- ulint n, /*!< in: index of slot */
- ulint page_no,/*!< in: page number of the undo log segment */
- mtr_t* mtr); /*!< in: mtr */
-/****************************************************************//**
-Looks for a free slot for an undo log segment.
-@return slot index or ULINT_UNDEFINED if not found */
-UNIV_INLINE
-ulint
-trx_rsegf_undo_find_free(const trx_rsegf_t* rsegf);
-
/** Create a rollback segment header.
@param[in,out] space system, undo, or temporary tablespace
@param[in] rseg_id rollback segment identifier
@@ -155,10 +138,10 @@ struct trx_rseg_t {
/** Page number of the last not yet purged log header in the history
list; FIL_NULL if all list purged */
- ulint last_page_no;
+ uint32_t last_page_no;
/** Byte offset of the last not yet purged log header */
- ulint last_offset;
+ uint16_t last_offset;
/** trx_t::no * 2 + old_insert of the last not yet purged log */
trx_id_t last_commit;
@@ -255,15 +238,13 @@ If no binlog information is present, the first byte is NUL. */
/*-------------------------------------------------------------*/
/** Read the page number of an undo log slot.
-@param[in] rsegf rollback segment header
-@param[in] n slot number */
-inline
-uint32_t
-trx_rsegf_get_nth_undo(const trx_rsegf_t* rsegf, ulint n)
+@param[in] rseg_header rollback segment header
+@param[in] n slot number */
+inline uint32_t trx_rsegf_get_nth_undo(const buf_block_t *rseg_header, ulint n)
{
- ut_ad(n < TRX_RSEG_N_SLOTS);
- return mach_read_from_4(rsegf + TRX_RSEG_UNDO_SLOTS
- + n * TRX_RSEG_SLOT_SIZE);
+ ut_ad(n < TRX_RSEG_N_SLOTS);
+ return mach_read_from_4(TRX_RSEG + TRX_RSEG_UNDO_SLOTS +
+ n * TRX_RSEG_SLOT_SIZE + rseg_header->frame);
}
#ifdef WITH_WSREP
@@ -273,7 +254,7 @@ trx_rsegf_get_nth_undo(const trx_rsegf_t* rsegf, ulint n)
@param[in,out] mtr mini-transaction */
void
trx_rseg_update_wsrep_checkpoint(
- trx_rsegf_t* rseg_header,
+ buf_block_t* rseg_header,
const XID* xid,
mtr_t* mtr);
@@ -295,7 +276,7 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid);
/** Upgrade a rollback segment header page to MariaDB 10.3 format.
@param[in,out] rseg_header rollback segment header page
@param[in,out] mtr mini-transaction */
-void trx_rseg_format_upgrade(trx_rsegf_t* rseg_header, mtr_t* mtr);
+void trx_rseg_format_upgrade(buf_block_t *rseg_header, mtr_t *mtr);
/** Update the offset information about the end of the binlog entry
which corresponds to the transaction just being committed.
@@ -304,8 +285,8 @@ up to which replication has proceeded.
@param[in,out] rseg_header rollback segment header
@param[in] trx committing transaction
@param[in,out] mtr mini-transaction */
-void
-trx_rseg_update_binlog_offset(byte* rseg_header, const trx_t* trx, mtr_t* mtr);
+void trx_rseg_update_binlog_offset(buf_block_t *rseg_header, const trx_t *trx,
+ mtr_t *mtr);
#include "trx0rseg.ic"
diff --git a/storage/innobase/include/trx0rseg.ic b/storage/innobase/include/trx0rseg.ic
index 0cff8fa1f5c..e0e8c175a5d 100644
--- a/storage/innobase/include/trx0rseg.ic
+++ b/storage/innobase/include/trx0rseg.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,7 +33,7 @@ Created 3/26/1996 Heikki Tuuri
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
-trx_rsegf_t*
+buf_block_t*
trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
{
ut_ad(space == fil_system.sys_space || space == fil_system.temp_space
@@ -44,8 +44,7 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER);
-
- return TRX_RSEG + block->frame;
+ return block;
}
/** Gets a newly created rollback segment header.
@@ -54,14 +53,13 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
@param[in,out] mtr mini-transaction
@return rollback segment header, page x-latched */
UNIV_INLINE
-trx_rsegf_t*
+buf_block_t*
trx_rsegf_get_new(
ulint space,
ulint page_no,
mtr_t* mtr)
{
buf_block_t* block;
- trx_rsegf_t* header;
ut_ad(space <= srv_undo_tablespaces_active || space == SRV_TMP_SPACE_ID
|| !srv_was_started);
@@ -70,54 +68,5 @@ trx_rsegf_get_new(
block = buf_page_get(page_id_t(space, page_no), 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER_NEW);
-
- header = TRX_RSEG + buf_block_get_frame(block);
-
- return(header);
-}
-
-/***************************************************************//**
-Sets the file page number of the nth undo log slot. */
-UNIV_INLINE
-void
-trx_rsegf_set_nth_undo(
-/*===================*/
- trx_rsegf_t* rsegf, /*!< in: rollback segment header */
- ulint n, /*!< in: index of slot */
- ulint page_no,/*!< in: page number of the undo log segment */
- mtr_t* mtr) /*!< in: mtr */
-{
- ut_a(n < TRX_RSEG_N_SLOTS);
-
- mlog_write_ulint(rsegf + TRX_RSEG_UNDO_SLOTS + n * TRX_RSEG_SLOT_SIZE,
- page_no, MLOG_4BYTES, mtr);
-}
-
-/****************************************************************//**
-Looks for a free slot for an undo log segment.
-@return slot index or ULINT_UNDEFINED if not found */
-UNIV_INLINE
-ulint
-trx_rsegf_undo_find_free(const trx_rsegf_t* rsegf)
-{
- ulint i;
- ulint page_no;
- ulint max_slots = TRX_RSEG_N_SLOTS;
-
-#ifdef UNIV_DEBUG
- if (trx_rseg_n_slots_debug) {
- max_slots = ut_min(static_cast<ulint>(trx_rseg_n_slots_debug),
- static_cast<ulint>(TRX_RSEG_N_SLOTS));
- }
-#endif
-
- for (i = 0; i < max_slots; i++) {
- page_no = trx_rsegf_get_nth_undo(rsegf, i);
-
- if (page_no == FIL_NULL) {
- return(i);
- }
- }
-
- return(ULINT_UNDEFINED);
+ return block;
}
diff --git a/storage/innobase/include/trx0types.h b/storage/innobase/include/trx0types.h
index 2aaec580d65..83d6d2c0db2 100644
--- a/storage/innobase/include/trx0types.h
+++ b/storage/innobase/include/trx0types.h
@@ -121,8 +121,6 @@ struct trx_savept_t{
/** File objects */
/* @{ */
-/** Rollback segment header */
-typedef byte trx_rsegf_t;
/** Undo segment header */
typedef byte trx_usegf_t;
/** Undo log header */
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index ce92e5de5e1..8dec56a5e7d 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -46,10 +46,10 @@ UNIV_INLINE
roll_ptr_t
trx_undo_build_roll_ptr(
/*====================*/
- ibool is_insert, /*!< in: TRUE if insert undo log */
+ bool is_insert, /*!< in: TRUE if insert undo log */
ulint rseg_id, /*!< in: rollback segment id */
- ulint page_no, /*!< in: page number */
- ulint offset); /*!< in: offset of the undo entry within page */
+ uint32_t page_no, /*!< in: page number */
+ uint16_t offset); /*!< in: offset of the undo entry within page */
/***********************************************************************//**
Decodes a roll pointer. */
UNIV_INLINE
@@ -57,16 +57,16 @@ void
trx_undo_decode_roll_ptr(
/*=====================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer */
- ibool* is_insert, /*!< out: TRUE if insert undo log */
+ bool* is_insert, /*!< out: TRUE if insert undo log */
ulint* rseg_id, /*!< out: rollback segment id */
- ulint* page_no, /*!< out: page number */
- ulint* offset); /*!< out: offset of the undo
+ uint32_t* page_no, /*!< out: page number */
+ uint16_t* offset); /*!< out: offset of the undo
entry within page */
/***********************************************************************//**
-Returns TRUE if the roll pointer is of the insert type.
-@return TRUE if insert undo log */
+Determine if DB_ROLL_PTR is of the insert type.
+@return true if insert */
UNIV_INLINE
-ibool
+bool
trx_undo_roll_ptr_is_insert(
/*========================*/
roll_ptr_t roll_ptr); /*!< in: roll pointer */
@@ -101,7 +101,7 @@ inline roll_ptr_t trx_read_roll_ptr(const byte* ptr)
@param[in,out] mtr mini-transaction
@return pointer to page x-latched */
UNIV_INLINE
-page_t*
+buf_block_t*
trx_undo_page_get(const page_id_t page_id, mtr_t* mtr);
/** Gets an undo log page and s-latches it.
@@ -109,56 +109,52 @@ trx_undo_page_get(const page_id_t page_id, mtr_t* mtr);
@param[in,out] mtr mini-transaction
@return pointer to page s-latched */
UNIV_INLINE
-page_t*
+buf_block_t*
trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr);
-/******************************************************************//**
-Returns the next undo log record on the page in the specified log, or
-NULL if none exists.
-@return pointer to record, NULL if none */
-UNIV_INLINE
-trx_undo_rec_t*
-trx_undo_page_get_next_rec(
-/*=======================*/
- trx_undo_rec_t* rec, /*!< in: undo log record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset);/*!< in: undo log header offset on page */
-/***********************************************************************//**
-Gets the previous record in an undo log.
-@return undo log record, the page s-latched, NULL if none */
+/** Get the next record in an undo log.
+@param[in] undo_page undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@return undo log record, the page latched, NULL if none */
+inline trx_undo_rec_t*
+trx_undo_page_get_next_rec(const buf_block_t *undo_page, uint16_t rec,
+ uint32_t page_no, uint16_t offset);
+/** Get the previous record in an undo log.
+@param[in,out] block undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in] shared latching mode: true=RW_S_LATCH, false=RW_X_LATCH
+@param[in,out] mtr mini-transaction
+@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
-trx_undo_get_prev_rec(
-/*==================*/
- trx_undo_rec_t* rec, /*!< in: undo record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset, /*!< in: undo log header offset on page */
- bool shared, /*!< in: true=S-latch, false=X-latch */
- mtr_t* mtr); /*!< in: mtr */
-/***********************************************************************//**
-Gets the next record in an undo log.
-@return undo log record, the page s-latched, NULL if none */
+trx_undo_get_prev_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
+ uint16_t offset, bool shared, mtr_t *mtr);
+/** Get the next record in an undo log.
+@param[in,out] block undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in,out] mtr mini-transaction
+@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
-trx_undo_get_next_rec(
-/*==================*/
- trx_undo_rec_t* rec, /*!< in: undo record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset, /*!< in: undo log header offset on page */
- mtr_t* mtr); /*!< in: mtr */
-
-/** Gets the first record in an undo log.
-@param[in] space undo log header space
-@param[in] page_no undo log header page number
-@param[in] offset undo log header offset on page
-@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
-@param[in,out] mtr mini-transaction
+trx_undo_get_next_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
+ uint16_t offset, mtr_t *mtr);
+
+/** Get the first record in an undo log.
+@param[in] space undo log header space
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
+@param[out] block undo log page
+@param[in,out] mtr mini-transaction
@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
-trx_undo_get_first_rec(
- fil_space_t* space,
- ulint page_no,
- ulint offset,
- ulint mode,
- mtr_t* mtr);
+trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
+ uint16_t offset, ulint mode, buf_block_t*& block,
+ mtr_t *mtr);
/** Allocate an undo log page.
@param[in,out] undo undo log
@@ -193,8 +189,8 @@ freed, but emptied, if all the records there are below the limit.
void
trx_undo_truncate_start(
trx_rseg_t* rseg,
- ulint hdr_page_no,
- ulint hdr_offset,
+ uint32_t hdr_page_no,
+ uint16_t hdr_offset,
undo_no_t limit);
/** Mark that an undo log header belongs to a data dictionary transaction.
@param[in] trx dictionary transaction
@@ -227,7 +223,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@return undo log segment header page, x-latched */
-page_t*
+buf_block_t*
trx_undo_set_state_at_finish(
/*=========================*/
trx_undo_t* undo, /*!< in: undo log memory copy */
@@ -237,14 +233,10 @@ trx_undo_set_state_at_finish(
@param[in,out] trx transaction
@param[in,out] undo undo log
@param[in] rollback false=XA PREPARE, true=XA ROLLBACK
-@param[in,out] mtr mini-transaction
-@return undo log segment header page, x-latched */
-page_t*
-trx_undo_set_state_at_prepare(
- trx_t* trx,
- trx_undo_t* undo,
- bool rollback,
- mtr_t* mtr);
+@param[in,out] mtr mini-transaction */
+void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback,
+ mtr_t *mtr)
+ MY_ATTRIBUTE((nonnull));
/** Free an old insert or temporary undo log after commit or rollback.
The information is not needed after a commit or rollback, therefore
@@ -281,14 +273,14 @@ trx_undo_parse_page_header_reuse(
/** Parse the redo log entry of an undo log page header create.
@param[in] ptr redo log record
@param[in] end_ptr end of log buffer
-@param[in,out] page page frame or NULL
+@param[in,out] block page frame or NULL
@param[in,out] mtr mini-transaction or NULL
@return end of log record or NULL */
byte*
trx_undo_parse_page_header(
const byte* ptr,
const byte* end_ptr,
- page_t* page,
+ buf_block_t* block,
mtr_t* mtr);
/** Read an undo log when starting up the database.
@param[in,out] rseg rollback segment
@@ -296,9 +288,9 @@ trx_undo_parse_page_header(
@param[in] page_no undo log segment page number
@param[in,out] max_trx_id the largest observed transaction ID
@return size of the undo log in pages */
-ulint
-trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
- trx_id_t& max_trx_id);
+uint32_t
+trx_undo_mem_create_at_db_start(trx_rseg_t *rseg, ulint id, uint32_t page_no,
+ trx_id_t &max_trx_id);
#endif /* !UNIV_INNOCHECKSUM */
@@ -340,20 +332,20 @@ struct trx_undo_t {
id */
trx_rseg_t* rseg; /*!< rseg where the undo log belongs */
/*-----------------------------*/
- ulint hdr_page_no; /*!< page number of the header page in
+ uint32_t hdr_page_no; /*!< page number of the header page in
the undo log */
- ulint hdr_offset; /*!< header offset of the undo log on
- the page */
- ulint last_page_no; /*!< page number of the last page in the
+ uint32_t last_page_no; /*!< page number of the last page in the
undo log; this may differ from
top_page_no during a rollback */
- ulint size; /*!< current size in pages */
+ uint16_t hdr_offset; /*!< header offset of the undo log on
+ the page */
+ uint32_t size; /*!< current size in pages */
/*-----------------------------*/
- ulint top_page_no; /*!< page number where the latest undo
+ uint32_t top_page_no; /*!< page number where the latest undo
log record was catenated; during
rollback the page from which the latest
undo record was chosen */
- ulint top_offset; /*!< offset of the latest undo record,
+ uint16_t top_offset; /*!< offset of the latest undo record,
i.e., the topmost element in the undo
log if we think of it as a stack */
undo_no_t top_undo_no; /*!< undo number of the latest record
diff --git a/storage/innobase/include/trx0undo.ic b/storage/innobase/include/trx0undo.ic
index 6d1ec16869e..06e31eb55b3 100644
--- a/storage/innobase/include/trx0undo.ic
+++ b/storage/innobase/include/trx0undo.ic
@@ -34,22 +34,17 @@ UNIV_INLINE
roll_ptr_t
trx_undo_build_roll_ptr(
/*====================*/
- ibool is_insert, /*!< in: TRUE if insert undo log */
+ bool is_insert, /*!< in: TRUE if insert undo log */
ulint rseg_id, /*!< in: rollback segment id */
- ulint page_no, /*!< in: page number */
- ulint offset) /*!< in: offset of the undo entry within page */
+ uint32_t page_no, /*!< in: page number */
+ uint16_t offset) /*!< in: offset of the undo entry within page */
{
- roll_ptr_t roll_ptr;
- compile_time_assert(DATA_ROLL_PTR_LEN == 7);
- ut_ad(is_insert == 0 || is_insert == 1);
- ut_ad(rseg_id < TRX_SYS_N_RSEGS);
- ut_ad(offset < 65536);
-
- roll_ptr = (roll_ptr_t) is_insert << ROLL_PTR_INSERT_FLAG_POS
- | (roll_ptr_t) rseg_id << ROLL_PTR_RSEG_ID_POS
- | (roll_ptr_t) page_no << ROLL_PTR_PAGE_POS
- | offset;
- return(roll_ptr);
+ compile_time_assert(DATA_ROLL_PTR_LEN == 7);
+ ut_ad(rseg_id < TRX_SYS_N_RSEGS);
+
+ return roll_ptr_t{is_insert} << ROLL_PTR_INSERT_FLAG_POS |
+ roll_ptr_t{rseg_id} << ROLL_PTR_RSEG_ID_POS |
+ roll_ptr_t{page_no} << ROLL_PTR_PAGE_POS | offset;
}
/***********************************************************************//**
@@ -59,35 +54,32 @@ void
trx_undo_decode_roll_ptr(
/*=====================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer */
- ibool* is_insert, /*!< out: TRUE if insert undo log */
+ bool* is_insert, /*!< out: TRUE if insert undo log */
ulint* rseg_id, /*!< out: rollback segment id */
- ulint* page_no, /*!< out: page number */
- ulint* offset) /*!< out: offset of the undo
+ uint32_t* page_no, /*!< out: page number */
+ uint16_t* offset) /*!< out: offset of the undo
entry within page */
{
- compile_time_assert(DATA_ROLL_PTR_LEN == 7);
- ut_ad(roll_ptr < (1ULL << 56));
- *offset = (ulint) roll_ptr & 0xFFFF;
- roll_ptr >>= 16;
- *page_no = (ulint) roll_ptr & 0xFFFFFFFF;
- roll_ptr >>= 32;
- *rseg_id = (ulint) roll_ptr & 0x7F;
- roll_ptr >>= 7;
- *is_insert = (ibool) roll_ptr; /* TRUE==1 */
+ compile_time_assert(DATA_ROLL_PTR_LEN == 7);
+ ut_ad(roll_ptr < (1ULL << 56));
+ *offset= static_cast<uint16_t>(roll_ptr);
+ *page_no= static_cast<uint32_t>(roll_ptr >> 16);
+ *rseg_id= static_cast<ulint>(roll_ptr >> 48 & 0x7F);
+ *is_insert= static_cast<bool>(roll_ptr >> 55);
}
/***********************************************************************//**
-Returns TRUE if the roll pointer is of the insert type.
-@return TRUE if insert undo log */
+Determine if DB_ROLL_PTR is of the insert type.
+@return true if insert */
UNIV_INLINE
-ibool
+bool
trx_undo_roll_ptr_is_insert(
/*========================*/
roll_ptr_t roll_ptr) /*!< in: roll pointer */
{
compile_time_assert(DATA_ROLL_PTR_LEN == 7);
ut_ad(roll_ptr < (1ULL << (ROLL_PTR_INSERT_FLAG_POS + 1)));
- return((ibool) (roll_ptr >> ROLL_PTR_INSERT_FLAG_POS));
+ return static_cast<bool>(roll_ptr >> ROLL_PTR_INSERT_FLAG_POS);
}
/***********************************************************************//**
@@ -108,14 +100,13 @@ trx_undo_trx_id_is_insert(
@param[in,out] mtr mini-transaction
@return pointer to page x-latched */
UNIV_INLINE
-page_t*
+buf_block_t*
trx_undo_page_get(const page_id_t page_id, mtr_t* mtr)
{
buf_block_t* block = buf_page_get(page_id, 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
-
- return(buf_block_get_frame(block));
+ return block;
}
/** Gets an undo log page and s-latches it.
@@ -123,14 +114,14 @@ trx_undo_page_get(const page_id_t page_id, mtr_t* mtr)
@param[in,out] mtr mini-transaction
@return pointer to page s-latched */
UNIV_INLINE
-page_t*
+buf_block_t*
trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr)
{
buf_block_t* block = buf_page_get(page_id, 0, RW_S_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
- return(buf_block_get_frame(block));
+ return block;
}
/** Determine the end offset of undo log records of an undo log page.
@@ -139,46 +130,29 @@ trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr)
@param[in] offset undo log header offset
@return end offset */
inline
-uint16_t
-trx_undo_page_get_end(const page_t* undo_page, ulint page_no, ulint offset)
+uint16_t trx_undo_page_get_end(const buf_block_t *undo_page, uint32_t page_no,
+ uint16_t offset)
{
- if (page_no == page_get_page_no(undo_page)) {
- if (uint16_t end = mach_read_from_2(TRX_UNDO_NEXT_LOG
- + offset + undo_page)) {
- return end;
- }
- }
-
- return mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
- + undo_page);
+ if (page_no == undo_page->page.id.page_no())
+ if (uint16_t end = mach_read_from_2(TRX_UNDO_NEXT_LOG + offset +
+ undo_page->frame))
+ return end;
+
+ return mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE +
+ undo_page->frame);
}
-/******************************************************************//**
-Returns the next undo log record on the page in the specified log, or
-NULL if none exists.
-@return pointer to record, NULL if none */
-UNIV_INLINE
-trx_undo_rec_t*
-trx_undo_page_get_next_rec(
-/*=======================*/
- trx_undo_rec_t* rec, /*!< in: undo log record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset) /*!< in: undo log header offset on page */
+/** Get the next record in an undo log.
+@param[in] undo_page undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@return undo log record, the page latched, NULL if none */
+inline trx_undo_rec_t*
+trx_undo_page_get_next_rec(const buf_block_t *undo_page, uint16_t rec,
+ uint32_t page_no, uint16_t offset)
{
- page_t* undo_page;
- ulint end;
- ulint next;
-
- undo_page = (page_t*) ut_align_down(rec, srv_page_size);
-
- end = trx_undo_page_get_end(undo_page, page_no, offset);
-
- next = mach_read_from_2(rec);
-
- if (next == end) {
-
- return(NULL);
- }
-
- return(undo_page + next);
+ uint16_t end= trx_undo_page_get_end(undo_page, page_no, offset);
+ uint16_t next= mach_read_from_2(undo_page->frame + rec);
+ return next == end ? nullptr : undo_page->frame + next;
}
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 9a0abb8853c..958ea15f8ef 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -62,7 +62,7 @@ General philosophy of InnoDB redo-logs:
through mtr, which in mtr_commit() writes log records
to the InnoDB redo log.
-2) Normally these changes are performed using a mlog_write_ulint()
+2) Normally these changes are performed using a mlog_write()
or similar function.
3) In some page level operations only a code number of a
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index 96dbcf69e59..c606dea7f9f 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -1619,7 +1619,7 @@ parse_log:
break;
case MLOG_UNDO_HDR_CREATE:
ut_ad(!page || page_type == FIL_PAGE_UNDO_LOG);
- ptr = trx_undo_parse_page_header(ptr, end_ptr, page, mtr);
+ ptr = trx_undo_parse_page_header(ptr, end_ptr, block, mtr);
break;
case MLOG_REC_MIN_MARK: case MLOG_COMP_REC_MIN_MARK:
ut_ad(!page || fil_page_type_is_index(page_type));
diff --git a/storage/innobase/mtr/mtr0log.cc b/storage/innobase/mtr/mtr0log.cc
index bd56ec67394..16dad7e0cad 100644
--- a/storage/innobase/mtr/mtr0log.cc
+++ b/storage/innobase/mtr/mtr0log.cc
@@ -90,7 +90,7 @@ mlog_parse_initial_log_record(
}
/********************************************************//**
-Parses a log record written by mlog_write_ulint, mlog_write_ull, mlog_memset.
+Parses a log record written by mtr_t::write(), mlog_memset().
@return parsed record end, NULL if not a complete record or a corrupt record */
const byte*
mlog_parse_nbytes(
@@ -213,80 +213,58 @@ mlog_parse_nbytes(
return const_cast<byte*>(ptr);
}
-/********************************************************//**
-Writes 1, 2 or 4 bytes to a file page. Writes the corresponding log
-record to the mini-transaction log if mtr is not NULL. */
-void
-mlog_write_ulint(
-/*=============*/
- byte* ptr, /*!< in: pointer where to write */
- ulint val, /*!< in: value to write */
- mlog_id_t type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/**
+Write a log record for writing 1, 2, 4, or 8 bytes.
+@param[in] block file page
+@param[in,out] ptr pointer in file page
+@param[in] l number of bytes to write
+@param[in,out] log_ptr log record buffer
+@param[in,out] mtr mini-transaction */
+static byte *
+mlog_log_write_low(const buf_block_t &block, byte *ptr, mlog_id_t l,
+ byte *log_ptr, mtr_t &mtr)
{
- switch (type) {
- case MLOG_1BYTE:
- mach_write_to_1(ptr, val);
- break;
- case MLOG_2BYTES:
- mach_write_to_2(ptr, val);
- break;
- case MLOG_4BYTES:
- mach_write_to_4(ptr, val);
- break;
- default:
- ut_error;
- }
-
- if (mtr != 0) {
- byte* log_ptr = mlog_open(mtr, 11 + 2 + 5);
-
- /* If no logging is requested, we may return now */
-
- if (log_ptr != 0) {
-
- log_ptr = mlog_write_initial_log_record_fast(
- ptr, type, log_ptr, mtr);
-
- mach_write_to_2(log_ptr, page_offset(ptr));
- log_ptr += 2;
-
- log_ptr += mach_write_compressed(log_ptr, val);
-
- mlog_close(mtr, log_ptr);
- }
- }
+ ut_ad(block.page.state == BUF_BLOCK_FILE_PAGE);
+ ut_ad(ptr >= block.frame + FIL_PAGE_OFFSET);
+ ut_ad(ptr + unsigned(l) <= &block.frame[srv_page_size - FIL_PAGE_DATA_END]);
+ log_ptr= mlog_write_initial_log_record_low(l,
+ block.page.id.space(),
+ block.page.id.page_no(),
+ log_ptr, &mtr);
+ mach_write_to_2(log_ptr, page_offset(ptr));
+ return log_ptr + 2;
}
-/********************************************************//**
-Writes 8 bytes to a file page. Writes the corresponding log
-record to the mini-transaction log, only if mtr is not NULL */
-void
-mlog_write_ull(
-/*===========*/
- byte* ptr, /*!< in: pointer where to write */
- ib_uint64_t val, /*!< in: value to write */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+/**
+Write a log record for writing 1, 2, or 4 bytes.
+@param[in] block file page
+@param[in,out] ptr pointer in file page
+@param[in] l number of bytes to write
+@param[in,out] log_ptr log record buffer
+@param[in] val value to write */
+void mtr_t::log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
+ byte *log_ptr, uint32_t val)
{
- mach_write_to_8(ptr, val);
-
- if (mtr != 0) {
- byte* log_ptr = mlog_open(mtr, 11 + 2 + 9);
-
- /* If no logging is requested, we may return now */
- if (log_ptr != 0) {
-
- log_ptr = mlog_write_initial_log_record_fast(
- ptr, MLOG_8BYTES, log_ptr, mtr);
-
- mach_write_to_2(log_ptr, page_offset(ptr));
- log_ptr += 2;
-
- log_ptr += mach_u64_write_compressed(log_ptr, val);
+ ut_ad(l == MLOG_1BYTE || l == MLOG_2BYTES || l == MLOG_4BYTES);
+ log_ptr= mlog_log_write_low(block, ptr, l, log_ptr, *this);
+ log_ptr+= mach_write_compressed(log_ptr, val);
+ mlog_close(this, log_ptr);
+}
- mlog_close(mtr, log_ptr);
- }
- }
+/**
+Write a log record for writing 8 bytes.
+@param[in] block file page
+@param[in,out] ptr pointer in file page
+@param[in] l number of bytes to write
+@param[in,out] log_ptr log record buffer
+@param[in] val value to write */
+void mtr_t::log_write(const buf_block_t &block, byte *ptr, mlog_id_t l,
+ byte *log_ptr, uint64_t val)
+{
+ ut_ad(l == MLOG_8BYTES);
+ log_ptr= mlog_log_write_low(block, ptr, l, log_ptr, *this);
+ log_ptr+= mach_u64_write_compressed(log_ptr, val);
+ mlog_close(this, log_ptr);
}
/********************************************************//**
@@ -402,7 +380,7 @@ mlog_parse_string(
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void
-mlog_memset(buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr)
+mlog_memset(const buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr)
{
ut_ad(len);
ut_ad(ofs <= ulint(srv_page_size));
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index a6e1e5e5b8a..870c9777f9b 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -196,24 +196,19 @@ page_set_max_trx_id(
trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction, or NULL */
{
- page_t* page = buf_block_get_frame(block);
- ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
-
- /* It is not necessary to write this change to the redo log, as
- during a database recovery we assume that the max trx id of every
- page is the maximum trx id assigned before the crash. */
-
- if (page_zip) {
- mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id);
- page_zip_write_header(page_zip,
- page + (PAGE_HEADER + PAGE_MAX_TRX_ID),
- 8, mtr);
- } else if (mtr) {
- mlog_write_ull(page + (PAGE_HEADER + PAGE_MAX_TRX_ID),
- trx_id, mtr);
- } else {
- mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id);
- }
+ ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
+ static_assert((PAGE_HEADER + PAGE_MAX_TRX_ID) % 8 == 0, "alignment");
+ byte *max_trx_id= static_cast<byte*>(MY_ASSUME_ALIGNED(PAGE_MAX_TRX_ID
+ + PAGE_HEADER
+ + block->frame, 8));
+
+ if (UNIV_LIKELY_NULL(page_zip))
+ {
+ mach_write_to_8(max_trx_id, trx_id);
+ page_zip_write_header(page_zip, max_trx_id, 8, mtr);
+ }
+ else
+ mtr->write<8>(*block, max_trx_id, trx_id);
}
/** Persist the AUTO_INCREMENT value on a clustered index root page.
@@ -227,27 +222,23 @@ page_set_max_trx_id(
void
page_set_autoinc(
buf_block_t* block,
- const dict_index_t* index MY_ATTRIBUTE((unused)),
ib_uint64_t autoinc,
mtr_t* mtr,
bool reset)
{
- ut_ad(mtr_memo_contains_flagged(
- mtr, block, MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX));
- ut_ad(index->is_primary());
- ut_ad(index->page == block->page.id.page_no());
- ut_ad(index->table->space_id == block->page.id.space());
-
- byte* field = PAGE_HEADER + PAGE_ROOT_AUTO_INC
- + buf_block_get_frame(block);
- if (!reset && mach_read_from_8(field) >= autoinc) {
- /* nothing to update */
- } else if (page_zip_des_t* page_zip = buf_block_get_page_zip(block)) {
- mach_write_to_8(field, autoinc);
- page_zip_write_header(page_zip, field, 8, mtr);
- } else {
- mlog_write_ull(field, autoinc, mtr);
- }
+ ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX |
+ MTR_MEMO_PAGE_SX_FIX));
+
+ byte *field= PAGE_HEADER + PAGE_ROOT_AUTO_INC + block->frame;
+ if (!reset && mach_read_from_8(field) >= autoinc)
+ /* nothing to update */;
+ else if (page_zip_des_t* page_zip = buf_block_get_page_zip(block))
+ {
+ mach_write_to_8(field, autoinc);
+ page_zip_write_header(page_zip, field, 8, mtr);
+ }
+ else
+ mtr->write<8,mtr_t::OPT>(*block, field, autoinc);
}
/** The page infimum and supremum of an empty page in ROW_FORMAT=REDUNDANT */
@@ -483,12 +474,12 @@ page_create_empty(
page_header_get_field(page, PAGE_LEVEL),
max_trx_id, mtr);
} else {
- page_create(block, mtr, page_is_comp(page),
- dict_index_is_spatial(index));
+ page_create(block, mtr, index->table->not_redundant(),
+ index->is_spatial());
if (max_trx_id) {
- mlog_write_ull(PAGE_HEADER + PAGE_MAX_TRX_ID + page,
- max_trx_id, mtr);
+ mtr->write<8>(*block, PAGE_HEADER + PAGE_MAX_TRX_ID
+ + block->frame, max_trx_id);
}
}
}
@@ -581,12 +572,13 @@ page_copy_rec_list_end(
{
page_t* new_page = buf_block_get_frame(new_block);
page_zip_des_t* new_page_zip = buf_block_get_page_zip(new_block);
- page_t* page = page_align(rec);
+ page_t* page = block->frame;
rec_t* ret = page_rec_get_next(
page_get_infimum_rec(new_page));
ulint num_moved = 0;
rtr_rec_move_t* rec_move = NULL;
mem_heap_t* heap = NULL;
+ ut_ad(page_align(rec) == page);
#ifdef UNIV_ZIP_DEBUG
if (new_page_zip) {
@@ -810,8 +802,9 @@ page_copy_rec_list_start(
for MVCC. */
if (is_leaf && dict_index_is_sec_or_ibuf(index)
&& !index->table->is_temporary()) {
- page_update_max_trx_id(new_block, NULL,
- page_get_max_trx_id(page_align(rec)),
+ page_update_max_trx_id(new_block,
+ new_page_zip,
+ page_get_max_trx_id(block->frame),
mtr);
}
@@ -979,7 +972,6 @@ page_delete_rec_list_end(
rec_t* prev_rec;
ulint n_owned;
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
- page_t* page = page_align(rec);
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@@ -987,8 +979,9 @@ page_delete_rec_list_end(
ut_ad(size == ULINT_UNDEFINED || size < srv_page_size);
ut_ad(!page_zip || page_rec_is_comp(rec));
+ ut_ad(page_align(rec) == block->frame);
#ifdef UNIV_ZIP_DEBUG
- ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+ ut_a(!page_zip || page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
if (page_rec_is_supremum(rec)) {
@@ -1006,19 +999,21 @@ page_delete_rec_list_end(
only be executed when applying redo log that was
generated by an older version of MySQL. */
} else if (page_rec_is_infimum(rec)
- || n_recs == page_get_n_recs(page)) {
+ || n_recs == page_get_n_recs(block->frame)) {
delete_all:
/* We are deleting all records. */
page_create_empty(block, index, mtr);
return;
- } else if (page_is_comp(page)) {
- if (page_rec_get_next_low(page + PAGE_NEW_INFIMUM, 1) == rec) {
+ } else if (page_is_comp(block->frame)) {
+ if (page_rec_get_next_low(block->frame + PAGE_NEW_INFIMUM, 1)
+ == rec) {
/* We are deleting everything from the first
user record onwards. */
goto delete_all;
}
} else {
- if (page_rec_get_next_low(page + PAGE_OLD_INFIMUM, 0) == rec) {
+ if (page_rec_get_next_low(block->frame + PAGE_OLD_INFIMUM, 0)
+ == rec) {
/* We are deleting everything from the first
user record onwards. */
goto delete_all;
@@ -1028,23 +1023,23 @@ delete_all:
/* Reset the last insert info in the page header and increment
the modify clock for the frame */
- page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL);
+ page_header_set_ptr(block->frame, page_zip, PAGE_LAST_INSERT, NULL);
/* The page gets invalid for optimistic searches: increment the
frame modify clock */
buf_block_modify_clock_inc(block);
- page_delete_rec_list_write_log(rec, index, page_is_comp(page)
+ page_delete_rec_list_write_log(rec, index, page_is_comp(block->frame)
? MLOG_COMP_LIST_END_DELETE
: MLOG_LIST_END_DELETE, mtr);
- const bool is_leaf = page_is_leaf(page);
+ const bool is_leaf = page_is_leaf(block->frame);
if (page_zip) {
mtr_log_t log_mode;
- ut_a(page_is_comp(page));
+ ut_ad(page_is_comp(block->frame));
/* Individual deletes are not logged */
log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
@@ -1057,7 +1052,7 @@ delete_all:
ULINT_UNDEFINED, &heap);
rec = rec_get_next_ptr(rec, TRUE);
#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, index));
+ ut_a(page_zip_validate(page_zip, block->frame, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&cur, index, offsets, mtr);
} while (page_offset(rec) != PAGE_NEW_SUPREMUM);
@@ -1074,7 +1069,7 @@ delete_all:
prev_rec = page_rec_get_prev(rec);
- last_rec = page_rec_get_prev(page_get_supremum_rec(page));
+ last_rec = page_rec_get_prev(page_get_supremum_rec(block->frame));
bool scrub = srv_immediate_scrub_data_uncompressed;
if ((size == ULINT_UNDEFINED) || (n_recs == ULINT_UNDEFINED) ||
@@ -1090,7 +1085,7 @@ delete_all:
is_leaf,
ULINT_UNDEFINED, &heap);
s = rec_offs_size(offsets);
- ut_ad(ulint(rec2 - page) + s
+ ut_ad(ulint(rec2 - block->frame) + s
- rec_offs_extra_size(offsets)
< srv_page_size);
ut_ad(size + s < srv_page_size);
@@ -1116,7 +1111,7 @@ delete_all:
of the records owned by the supremum record, as it is allowed to be
less than PAGE_DIR_SLOT_MIN_N_OWNED */
- if (page_is_comp(page)) {
+ if (page_is_comp(block->frame)) {
rec_t* rec2 = rec;
ulint count = 0;
@@ -1131,7 +1126,7 @@ delete_all:
n_owned = rec_get_n_owned_new(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
ut_ad(slot_index > 0);
- slot = page_dir_get_nth_slot(page, slot_index);
+ slot = page_dir_get_nth_slot(block->frame, slot_index);
} else {
rec_t* rec2 = rec;
ulint count = 0;
@@ -1147,28 +1142,30 @@ delete_all:
n_owned = rec_get_n_owned_old(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
ut_ad(slot_index > 0);
- slot = page_dir_get_nth_slot(page, slot_index);
+ slot = page_dir_get_nth_slot(block->frame, slot_index);
}
- page_dir_slot_set_rec(slot, page_get_supremum_rec(page));
+ page_dir_slot_set_rec(slot, page_get_supremum_rec(block->frame));
page_dir_slot_set_n_owned(slot, NULL, n_owned);
- page_dir_set_n_slots(page, NULL, slot_index + 1);
+ page_dir_set_n_slots(block->frame, NULL, slot_index + 1);
/* Remove the record chain segment from the record chain */
- page_rec_set_next(prev_rec, page_get_supremum_rec(page));
+ page_rec_set_next(prev_rec, page_get_supremum_rec(block->frame));
/* Catenate the deleted chain segment to the page free list */
- page_rec_set_next(last_rec, page_header_get_ptr(page, PAGE_FREE));
- page_header_set_ptr(page, NULL, PAGE_FREE, rec);
+ page_rec_set_next(last_rec, page_header_get_ptr(block->frame,
+ PAGE_FREE));
+ page_header_set_ptr(block->frame, NULL, PAGE_FREE, rec);
- page_header_set_field(page, NULL, PAGE_GARBAGE, size
- + page_header_get_field(page, PAGE_GARBAGE));
+ page_header_set_field(block->frame, NULL, PAGE_GARBAGE, size
+ + page_header_get_field(block->frame,
+ PAGE_GARBAGE));
- ut_ad(page_get_n_recs(page) > n_recs);
- page_header_set_field(page, NULL, PAGE_N_RECS,
- (ulint)(page_get_n_recs(page) - n_recs));
+ ut_ad(page_get_n_recs(block->frame) > n_recs);
+ page_header_set_field(block->frame, NULL, PAGE_N_RECS,
+ ulint{page_get_n_recs(block->frame) - n_recs});
}
/*************************************************************//**
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 41731ed17a0..ebfa02292cd 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -924,12 +924,11 @@ skip_secondaries:
if (dfield_is_ext(&ufield->new_val)) {
trx_rseg_t* rseg;
buf_block_t* block;
- ulint internal_offset;
byte* data_field;
- ibool is_insert;
+ bool is_insert;
ulint rseg_id;
- ulint page_no;
- ulint offset;
+ uint32_t page_no;
+ uint16_t offset;
/* We use the fact that new_val points to
undo_rec and get thus the offset of
@@ -937,7 +936,7 @@ skip_secondaries:
can calculate from node->roll_ptr the file
address of the new_val data */
- internal_offset = ulint(
+ const uint16_t internal_offset = uint16_t(
static_cast<const byte*>
(dfield_get_data(&ufield->new_val))
- undo_rec);
@@ -989,7 +988,7 @@ skip_secondaries:
index,
data_field + dfield_get_len(&ufield->new_val)
- BTR_EXTERN_FIELD_REF_SIZE,
- NULL, NULL, NULL, 0, false, &mtr);
+ NULL, NULL, block, 0, false, &mtr);
mtr.commit();
}
}
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 341146c0a36..f0e47bec82c 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -208,20 +208,22 @@ func_exit:
/* When rolling back the very first instant ADD COLUMN
operation, reset the root page to the basic state. */
ut_ad(!index->table->is_temporary());
- if (page_t* root = btr_root_get(index, &mtr)) {
- byte* page_type = root + FIL_PAGE_TYPE;
+ if (buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH,
+ &mtr)) {
+ byte* page_type = root->frame + FIL_PAGE_TYPE;
ut_ad(mach_read_from_2(page_type)
== FIL_PAGE_TYPE_INSTANT
|| mach_read_from_2(page_type)
== FIL_PAGE_INDEX);
- mlog_write_ulint(page_type, FIL_PAGE_INDEX,
- MLOG_2BYTES, &mtr);
- byte* instant = PAGE_INSTANT + PAGE_HEADER + root;
- mlog_write_ulint(instant,
- page_ptr_get_direction(instant + 1),
- MLOG_2BYTES, &mtr);
- rec_t* infimum = page_get_infimum_rec(root);
- rec_t* supremum = page_get_supremum_rec(root);
+ mtr.write<2,mtr_t::OPT>(*root, page_type,
+ FIL_PAGE_INDEX);
+ byte* instant = PAGE_INSTANT + PAGE_HEADER
+ + root->frame;
+ mtr.write<2,mtr_t::OPT>(
+ *root, instant,
+ page_ptr_get_direction(instant + 1));
+ rec_t* infimum = page_get_infimum_rec(root->frame);
+ rec_t* supremum = page_get_supremum_rec(root->frame);
static const byte str[8 + 8] = "supremuminfimum";
if (memcmp(infimum, str + 8, 8)
|| memcmp(supremum, str, 8)) {
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index 18364f16ec5..1580a8e92cd 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -335,37 +335,30 @@ static bool row_undo_rec_get(undo_node_t* node)
mtr_t mtr;
mtr.start();
- page_t* undo_page = trx_undo_page_get_s_latched(
+ buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(undo->rseg->space->id, undo->top_page_no), &mtr);
- ulint offset = undo->top_offset;
-
- trx_undo_rec_t* prev_rec = trx_undo_get_prev_rec(
- undo_page + offset, undo->hdr_page_no, undo->hdr_offset,
- true, &mtr);
-
- if (prev_rec == NULL) {
- undo->top_undo_no = IB_ID_MAX;
- ut_ad(undo->empty());
- } else {
- page_t* prev_rec_page = page_align(prev_rec);
-
- if (prev_rec_page != undo_page) {
+ uint16_t offset = undo->top_offset;
+ buf_block_t* prev_page = undo_page;
+ if (trx_undo_rec_t* prev_rec = trx_undo_get_prev_rec(
+ prev_page, offset, undo->hdr_page_no, undo->hdr_offset,
+ true, &mtr)) {
+ if (prev_page != undo_page) {
trx->pages_undone++;
}
- undo->top_page_no = page_get_page_no(prev_rec_page);
- undo->top_offset = ulint(prev_rec - prev_rec_page);
+ undo->top_page_no = prev_page->page.id.page_no();
+ undo->top_offset = page_offset(prev_rec);
undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec);
ut_ad(!undo->empty());
+ } else {
+ undo->top_undo_no = IB_ID_MAX;
+ ut_ad(undo->empty());
}
- {
- const trx_undo_rec_t* undo_rec = undo_page + offset;
- node->undo_rec = trx_undo_rec_copy(undo_rec, node->heap);
- }
-
+ node->undo_rec = trx_undo_rec_copy(undo_page->frame + offset,
+ node->heap);
mtr.commit();
switch (trx_undo_rec_get_type(node->undo_rec)) {
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 5c71bfe4029..484876c8a9e 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2755,7 +2755,7 @@ err_exit:
insert fails, then this disown will be undone
when the operation is rolled back. */
btr_cur_disown_inherited_fields(
- btr_cur_get_page_zip(btr_cur),
+ btr_cur_get_block(btr_cur),
rec, index, offsets, node->update,
mtr);
}
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 4c80425e6dd..d6520699f0c 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1813,9 +1813,9 @@ files_checked:
ut_ad(size == fil_system.sys_space
->size_in_header);
size += sum_of_new_sizes;
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SIZE
- + block->frame, size,
- MLOG_4BYTES, &mtr);
+ mtr.write<4>(*block,
+ FSP_HEADER_OFFSET + FSP_SIZE
+ + block->frame, size);
fil_system.sys_space->size_in_header = size;
mtr.commit();
/* Immediately write the log record about
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 7bfeda51a3b..8247c7c30b1 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -211,15 +211,16 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
|| undo == trx->rsegs.m_redo.old_insert);
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
ut_ad(undo->rseg == rseg);
- trx_rsegf_t* rseg_header = trx_rsegf_get(
+ buf_block_t* rseg_header = trx_rsegf_get(
rseg->space, rseg->page_no, mtr);
- page_t* undo_page = trx_undo_set_state_at_finish(
+ buf_block_t* undo_page = trx_undo_set_state_at_finish(
undo, mtr);
- trx_ulogf_t* undo_header = undo_page + undo->hdr_offset;
+ trx_ulogf_t* undo_header = undo_page->frame + undo->hdr_offset;
ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1);
- if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG_FORMAT + rseg_header))) {
+ if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ + rseg_header->frame))) {
/* This database must have been upgraded from
before MariaDB 10.3.5. */
trx_rseg_format_upgrade(rseg_header, mtr);
@@ -228,23 +229,27 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
if (undo->state != TRX_UNDO_CACHED) {
/* The undo log segment will not be reused */
ut_a(undo->id < TRX_RSEG_N_SLOTS);
- trx_rsegf_set_nth_undo(rseg_header, undo->id, FIL_NULL, mtr);
+ compile_time_assert(FIL_NULL == 0xffffffff);
+ mlog_memset(TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ + undo->id * TRX_RSEG_SLOT_SIZE
+ + rseg_header->frame, 4, 0xff, mtr);
MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
uint32_t hist_size = mach_read_from_4(TRX_RSEG_HISTORY_SIZE
- + rseg_header);
+ + TRX_RSEG
+ + rseg_header->frame);
ut_ad(undo->size == flst_get_len(TRX_UNDO_SEG_HDR
+ TRX_UNDO_PAGE_LIST
- + undo_page));
-
- mlog_write_ulint(
- rseg_header + TRX_RSEG_HISTORY_SIZE,
- hist_size + undo->size, MLOG_4BYTES, mtr);
-
- mlog_write_ull(rseg_header + TRX_RSEG_MAX_TRX_ID,
- trx_sys.get_max_trx_id(), mtr);
+ + undo_page->frame));
+
+ mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_HISTORY_SIZE
+ + rseg_header->frame,
+ hist_size + undo->size);
+ mtr->write<8>(*rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID
+ + rseg_header->frame,
+ trx_sys.get_max_trx_id());
}
/* After the purge thread has been given permission to exit,
@@ -287,16 +292,17 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
}
/* Add the log as the first in the history list */
- flst_add_first(rseg_header + TRX_RSEG_HISTORY,
- undo_header + TRX_UNDO_HISTORY_NODE, mtr);
+ flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY,
+ undo_page, undo->hdr_offset + TRX_UNDO_HISTORY_NODE,
+ mtr);
- mlog_write_ull(undo_header + TRX_UNDO_TRX_NO, trx->no, mtr);
+ mtr->write<8>(*undo_page, undo_header + TRX_UNDO_TRX_NO, trx->no);
/* This is needed for upgrading old undo log pages from
before MariaDB 10.3.1. */
if (UNIV_UNLIKELY(!mach_read_from_2(undo_header
+ TRX_UNDO_NEEDS_PURGE))) {
- mlog_write_ulint(undo_header + TRX_UNDO_NEEDS_PURGE, 1,
- MLOG_2BYTES, mtr);
+ mtr->write<2>(*undo_page, undo_header + TRX_UNDO_NEEDS_PURGE,
+ 1U);
}
if (rseg->last_page_no == FIL_NULL) {
@@ -320,19 +326,16 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
}
/** Remove undo log header from the history list.
-@param[in,out] rseg_hdr rollback segment header
-@param[in] log_hdr undo log segment header
-@param[in,out] mtr mini transaction. */
-static
-void
-trx_purge_remove_log_hdr(
- trx_rsegf_t* rseg_hdr,
- trx_ulogf_t* log_hdr,
- mtr_t* mtr)
+@param[in,out] rseg rollback segment header page
+@param[in] log undo log segment header page
+@param[in] offset byte offset in the undo log segment header page
+@param[in,out] mtr mini-transaction */
+static void trx_purge_remove_log_hdr(buf_block_t *rseg, buf_block_t* log,
+ uint16_t offset, mtr_t *mtr)
{
- flst_remove(rseg_hdr + TRX_RSEG_HISTORY,
- log_hdr + TRX_UNDO_HISTORY_NODE, mtr);
- trx_sys.rseg_history_len--;
+ flst_remove(rseg, TRX_RSEG + TRX_RSEG_HISTORY,
+ log, offset + TRX_UNDO_HISTORY_NODE, mtr);
+ trx_sys.rseg_history_len--;
}
/** Free an undo log segment, and remove the header from the history list.
@@ -343,14 +346,12 @@ void
trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
{
mtr_t mtr;
- trx_rsegf_t* rseg_hdr;
- page_t* undo_page;
mtr.start();
mutex_enter(&rseg->mutex);
- rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
- undo_page = trx_undo_page_get(
+ buf_block_t* rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
+ buf_block_t* block = trx_undo_page_get(
page_id_t(rseg->space->id, hdr_addr.page), &mtr);
/* Mark the last undo log totally purged, so that if the
@@ -358,12 +359,12 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
again. The list of pages in the undo log tail gets
inconsistent during the freeing of the segment, and therefore
purge should not try to access them again. */
- mlog_write_ulint(undo_page + hdr_addr.boffset + TRX_UNDO_NEEDS_PURGE,
- 0, MLOG_2BYTES, &mtr);
+ mtr.write<2>(*block, block->frame + hdr_addr.boffset
+ + TRX_UNDO_NEEDS_PURGE, 0U);
while (!fseg_free_step_not_header(
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
- + undo_page, false, &mtr)) {
+ + block->frame, false, &mtr)) {
mutex_exit(&rseg->mutex);
mtr.commit();
@@ -373,7 +374,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
- undo_page = trx_undo_page_get(
+ block = trx_undo_page_get(
page_id_t(rseg->space->id, hdr_addr.page), &mtr);
}
@@ -381,15 +382,15 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
stored in the list base node tells us how big it was before we
started the freeing. */
- const ulint seg_size = flst_get_len(
- TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + undo_page);
+ const uint32_t seg_size = flst_get_len(
+ TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->frame);
/* We may free the undo log segment header page; it must be freed
within the same mtr as the undo log header is removed from the
history list: otherwise, in case of a database crash, the segment
could become inaccessible garbage in the file space. */
- trx_purge_remove_log_hdr(rseg_hdr, undo_page + hdr_addr.boffset, &mtr);
+ trx_purge_remove_log_hdr(rseg_hdr, block, hdr_addr.boffset, &mtr);
do {
@@ -399,14 +400,12 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
fsp0fsp.cc. */
} while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
- + undo_page, false, &mtr));
+ + block->frame, false, &mtr));
- const ulint hist_size = mach_read_from_4(rseg_hdr
- + TRX_RSEG_HISTORY_SIZE);
- ut_ad(hist_size >= seg_size);
+ byte* hist = TRX_RSEG + TRX_RSEG_HISTORY_SIZE + rseg_hdr->frame;
+ ut_ad(mach_read_from_4(hist) >= seg_size);
- mlog_write_ulint(rseg_hdr + TRX_RSEG_HISTORY_SIZE,
- hist_size - seg_size, MLOG_4BYTES, &mtr);
+ mtr.write<4>(*rseg_hdr, hist, mach_read_from_4(hist) - seg_size);
ut_ad(rseg->curr_size >= seg_size);
@@ -428,10 +427,6 @@ trx_purge_truncate_rseg_history(
{
fil_addr_t hdr_addr;
fil_addr_t prev_hdr_addr;
- trx_rsegf_t* rseg_hdr;
- page_t* undo_page;
- trx_ulogf_t* log_hdr;
- trx_usegf_t* seg_hdr;
mtr_t mtr;
trx_id_t undo_trx_no;
@@ -439,10 +434,10 @@ trx_purge_truncate_rseg_history(
ut_ad(rseg.is_persistent());
mutex_enter(&rseg.mutex);
- rseg_hdr = trx_rsegf_get(rseg.space, rseg.page_no, &mtr);
+ buf_block_t* rseg_hdr = trx_rsegf_get(rseg.space, rseg.page_no, &mtr);
- hdr_addr = trx_purge_get_log_from_hist(flst_get_last(TRX_RSEG_HISTORY
- + rseg_hdr));
+ hdr_addr = trx_purge_get_log_from_hist(
+ flst_get_last(TRX_RSEG + TRX_RSEG_HISTORY + rseg_hdr->frame));
loop:
if (hdr_addr.page == FIL_NULL) {
func_exit:
@@ -451,12 +446,11 @@ func_exit:
return;
}
- undo_page = trx_undo_page_get(page_id_t(rseg.space->id, hdr_addr.page),
- &mtr);
-
- log_hdr = undo_page + hdr_addr.boffset;
-
- undo_trx_no = mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO);
+ buf_block_t* block = trx_undo_page_get(page_id_t(rseg.space->id,
+ hdr_addr.page),
+ &mtr);
+ undo_trx_no = mach_read_from_8(block->frame + hdr_addr.boffset
+ + TRX_UNDO_TRX_NO);
if (undo_trx_no >= limit.trx_no()) {
if (undo_trx_no == limit.trx_no()) {
@@ -469,12 +463,13 @@ func_exit:
}
prev_hdr_addr = trx_purge_get_log_from_hist(
- flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE));
-
- seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
+ flst_get_prev_addr(block->frame + hdr_addr.boffset
+ + TRX_UNDO_HISTORY_NODE));
- if ((mach_read_from_2(seg_hdr + TRX_UNDO_STATE) == TRX_UNDO_TO_PURGE)
- && (mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG) == 0)) {
+ if (mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + block->frame)
+ == TRX_UNDO_TO_PURGE
+ && !mach_read_from_2(block->frame + hdr_addr.boffset
+ + TRX_UNDO_NEXT_LOG)) {
/* We can free the whole log segment */
@@ -486,7 +481,8 @@ func_exit:
trx_purge_free_segment(&rseg, hdr_addr);
} else {
/* Remove the log hdr from the rseg history. */
- trx_purge_remove_log_hdr(rseg_hdr, log_hdr, &mtr);
+ trx_purge_remove_log_hdr(rseg_hdr, block, hdr_addr.boffset,
+ &mtr);
mutex_exit(&rseg.mutex);
mtr.commit();
@@ -825,8 +821,6 @@ static void trx_purge_rseg_get_next_history_log(
ulint* n_pages_handled)/*!< in/out: number of UNDO pages
handled */
{
- page_t* undo_page;
- trx_ulogf_t* log_hdr;
fil_addr_t prev_log_addr;
trx_id_t trx_no;
mtr_t mtr;
@@ -841,11 +835,12 @@ static void trx_purge_rseg_get_next_history_log(
mtr.start();
- undo_page = trx_undo_page_get_s_latched(
+ const buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(purge_sys.rseg->space->id,
purge_sys.rseg->last_page_no), &mtr);
- log_hdr = undo_page + purge_sys.rseg->last_offset;
+ const trx_ulogf_t* log_hdr = undo_page->frame
+ + purge_sys.rseg->last_offset;
/* Increase the purge page count by one for every handled log */
@@ -873,7 +868,7 @@ static void trx_purge_rseg_get_next_history_log(
log_hdr = trx_undo_page_get_s_latched(
page_id_t(purge_sys.rseg->space->id, prev_log_addr.page),
- &mtr)
+ &mtr)->frame
+ prev_log_addr.boffset;
trx_no = mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO);
@@ -908,8 +903,8 @@ static
void
trx_purge_read_undo_rec()
{
- ulint offset;
- ulint page_no;
+ uint16_t offset;
+ uint32_t page_no;
ib_uint64_t undo_no;
purge_sys.hdr_offset = purge_sys.rseg->last_offset;
@@ -918,13 +913,15 @@ trx_purge_read_undo_rec()
if (purge_sys.rseg->needs_purge) {
mtr_t mtr;
mtr.start();
+ buf_block_t* undo_page;
if (trx_undo_rec_t* undo_rec = trx_undo_get_first_rec(
- purge_sys.rseg->space, purge_sys.hdr_page_no,
- purge_sys.hdr_offset, RW_S_LATCH, &mtr)) {
+ *purge_sys.rseg->space, purge_sys.hdr_page_no,
+ purge_sys.hdr_offset, RW_S_LATCH,
+ undo_page, &mtr)) {
offset = page_offset(undo_rec);
undo_no = trx_undo_rec_get_undo_no(undo_rec);
- page_no = page_get_page_no(page_align(undo_rec));
+ page_no = undo_page->page.id.page_no();
} else {
offset = 0;
undo_no = 0;
@@ -974,22 +971,14 @@ trx_purge_get_next_rec(
handled */
mem_heap_t* heap) /*!< in: memory heap where copied */
{
- trx_undo_rec_t* rec;
- trx_undo_rec_t* rec_copy;
- trx_undo_rec_t* rec2;
- page_t* undo_page;
- page_t* page;
- ulint offset;
- ulint page_no;
- ulint space;
mtr_t mtr;
ut_ad(purge_sys.next_stored);
ut_ad(purge_sys.tail.trx_no() < purge_sys.view.low_limit_no());
- space = purge_sys.rseg->space->id;
- page_no = purge_sys.page_no;
- offset = purge_sys.offset;
+ const ulint space = purge_sys.rseg->space->id;
+ const uint32_t page_no = purge_sys.page_no;
+ const uint16_t offset = purge_sys.offset;
if (offset == 0) {
/* It is the dummy undo log record, which means that there is
@@ -1006,16 +995,16 @@ trx_purge_get_next_rec(
mtr_start(&mtr);
- undo_page = trx_undo_page_get_s_latched(page_id_t(space, page_no),
- &mtr);
+ buf_block_t* undo_page = trx_undo_page_get_s_latched(
+ page_id_t(space, page_no), &mtr);
+ buf_block_t* rec2_page = undo_page;
- rec = undo_page + offset;
-
- rec2 = trx_undo_page_get_next_rec(rec, purge_sys.hdr_page_no,
- purge_sys.hdr_offset);
+ const trx_undo_rec_t* rec2 = trx_undo_page_get_next_rec(
+ undo_page, offset, purge_sys.hdr_page_no, purge_sys.hdr_offset);
if (rec2 == NULL) {
- rec2 = trx_undo_get_next_rec(rec, purge_sys.hdr_page_no,
+ rec2 = trx_undo_get_next_rec(rec2_page, offset,
+ purge_sys.hdr_page_no,
purge_sys.hdr_offset, &mtr);
}
@@ -1032,22 +1021,19 @@ trx_purge_get_next_rec(
undo_page = trx_undo_page_get_s_latched(
page_id_t(space, page_no), &mtr);
-
- rec = undo_page + offset;
} else {
- page = page_align(rec2);
-
- purge_sys.offset = ulint(rec2 - page);
- purge_sys.page_no = page_get_page_no(page);
+ purge_sys.offset = page_offset(rec2);
+ purge_sys.page_no = rec2_page->page.id.page_no();
purge_sys.tail.undo_no = trx_undo_rec_get_undo_no(rec2);
- if (undo_page != page) {
+ if (undo_page != rec2_page) {
/* We advance to a new page of the undo log: */
(*n_pages_handled)++;
}
}
- rec_copy = trx_undo_rec_copy(rec, heap);
+ trx_undo_rec_t* rec_copy = trx_undo_rec_copy(undo_page->frame + offset,
+ heap);
mtr_commit(&mtr);
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 2db1fee062c..04ee4428a91 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -162,7 +162,7 @@ that was written to ptr. Update the first free value by the number of bytes
written for this undo record.
@return offset of the inserted entry on the page if succeeded, 0 if fail */
static
-ulint
+uint16_t
trx_undo_page_set_next_prev_and_add(
/*================================*/
buf_block_t* undo_block, /*!< in/out: undo log page */
@@ -170,30 +170,22 @@ trx_undo_page_set_next_prev_and_add(
written on this undo page. */
mtr_t* mtr) /*!< in: mtr */
{
- ulint first_free; /*!< offset within undo_page */
- ulint end_of_rec; /*!< offset within undo_page */
- byte* ptr_to_first_free;
- /* pointer within undo_page
- that points to the next free
- offset value within undo_page.*/
-
- ut_ad(ptr > undo_block->frame);
- ut_ad(ptr < undo_block->frame + srv_page_size);
+ ut_ad(page_align(ptr) == undo_block->frame);
if (UNIV_UNLIKELY(trx_undo_left(undo_block, ptr) < 2)) {
return(0);
}
- ptr_to_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ byte* ptr_to_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ undo_block->frame;
- first_free = mach_read_from_2(ptr_to_first_free);
+ uint16_t first_free = mach_read_from_2(ptr_to_first_free);
/* Write offset of the previous undo log record */
mach_write_to_2(ptr, first_free);
ptr += 2;
- end_of_rec = ulint(ptr - undo_block->frame);
+ uint16_t end_of_rec = uint16_t(ptr - undo_block->frame);
/* Write offset of the next undo log record */
mach_write_to_2(undo_block->frame + first_free, end_of_rec);
@@ -457,7 +449,7 @@ trx_undo_report_insert_virtual(
Reports in the undo log of an insert of a clustered index record.
@return offset of the inserted entry on the page if succeed, 0 if fail */
static
-ulint
+uint16_t
trx_undo_page_report_insert(
/*========================*/
buf_block_t* undo_block, /*!< in: undo log page */
@@ -467,10 +459,6 @@ trx_undo_page_report_insert(
inserted to the clustered index */
mtr_t* mtr) /*!< in: mtr */
{
- ulint first_free;
- byte* ptr;
- ulint i;
-
ut_ad(dict_index_is_clust(index));
/* MariaDB 10.3.1+ in trx_undo_page_init() always initializes
TRX_UNDO_PAGE_TYPE as 0, but previous versions wrote
@@ -479,9 +467,10 @@ trx_undo_page_report_insert(
ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ undo_block->frame) <= 2);
- first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
- + undo_block->frame);
- ptr = undo_block->frame + first_free;
+ uint16_t first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR
+ + TRX_UNDO_PAGE_FREE
+ + undo_block->frame);
+ byte* ptr = undo_block->frame + first_free;
ut_ad(first_free <= srv_page_size);
@@ -509,7 +498,7 @@ trx_undo_page_report_insert(
goto done;
}
- for (i = 0; i < dict_index_get_n_unique(index); i++) {
+ for (unsigned i = 0; i < dict_index_get_n_unique(index); i++) {
const dfield_t* field = dtuple_get_nth_field(clust_entry, i);
ulint flen = dfield_get_len(field);
@@ -572,12 +561,14 @@ trx_undo_rec_get_pars(
*updated_extern = !!(type_cmpl & TRX_UNDO_UPD_EXTERN);
type_cmpl &= ~TRX_UNDO_UPD_EXTERN;
-
*type = type_cmpl & (TRX_UNDO_CMPL_INFO_MULT - 1);
+ ut_ad(*type >= TRX_UNDO_RENAME_TABLE);
+ ut_ad(*type <= TRX_UNDO_DEL_MARK_REC);
*cmpl_info = type_cmpl / TRX_UNDO_CMPL_INFO_MULT;
*undo_no = mach_read_next_much_compressed(&ptr);
*table_id = mach_read_next_much_compressed(&ptr);
+ ut_ad(*table_id);
return(const_cast<byte*>(ptr));
}
@@ -856,7 +847,7 @@ record.
@return byte offset of the inserted undo log entry on the page if
succeed, 0 if fail */
static
-ulint
+uint16_t
trx_undo_page_report_modify(
/*========================*/
buf_block_t* undo_block, /*!< in: undo log page */
@@ -875,7 +866,6 @@ trx_undo_page_report_modify(
virtual column info */
mtr_t* mtr) /*!< in: mtr */
{
- ulint first_free;
byte* ptr;
ut_ad(index->is_primary());
@@ -887,8 +877,9 @@ trx_undo_page_report_modify(
ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ undo_block->frame) <= 2);
- first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
- + undo_block->frame);
+ uint16_t first_free = mach_read_from_2(TRX_UNDO_PAGE_HDR
+ + TRX_UNDO_PAGE_FREE
+ + undo_block->frame);
ptr = undo_block->frame + first_free;
ut_ad(first_free <= srv_page_size);
@@ -1953,13 +1944,13 @@ trx_undo_erase_page_end(page_t* undo_page)
@return byte offset of the undo log record
@retval 0 in case of failure */
static
-ulint
+uint16_t
trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
buf_block_t* block, mtr_t* mtr)
{
byte* ptr_first_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ block->frame;
- ulint first_free = mach_read_from_2(ptr_first_free);
+ uint16_t first_free = mach_read_from_2(ptr_first_free);
ut_ad(first_free >= TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
ut_ad(first_free <= srv_page_size);
byte* start = block->frame + first_free;
@@ -1985,7 +1976,7 @@ trx_undo_page_report_rename(trx_t* trx, const dict_table_t* table,
ptr += len;
mach_write_to_2(ptr, first_free);
ptr += 2;
- ulint offset = page_offset(ptr);
+ uint16_t offset = page_offset(ptr);
mach_write_to_2(start, offset);
mach_write_to_2(ptr_first_free, offset);
@@ -2014,7 +2005,7 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
ut_ad(loop_count++ < 2);
ut_ad(undo->last_page_no == block->page.id.page_no());
- if (ulint offset = trx_undo_page_report_rename(
+ if (uint16_t offset = trx_undo_page_report_rename(
trx, table, block, &mtr)) {
undo->withdraw_clock = buf_withdraw_clock;
undo->top_page_no = undo->last_page_no;
@@ -2118,7 +2109,7 @@ trx_undo_report_row_operation(
ut_ad(undo != NULL);
do {
- ulint offset = !rec
+ uint16_t offset = !rec
? trx_undo_page_report_insert(
undo_block, trx, index, clust_entry, &mtr)
: trx_undo_page_report_modify(
@@ -2240,11 +2231,10 @@ trx_undo_get_undo_rec_low(
{
trx_undo_rec_t* undo_rec;
ulint rseg_id;
- ulint page_no;
- ulint offset;
- const page_t* undo_page;
+ uint32_t page_no;
+ uint16_t offset;
trx_rseg_t* rseg;
- ibool is_insert;
+ bool is_insert;
mtr_t mtr;
trx_undo_decode_roll_ptr(roll_ptr, &is_insert, &rseg_id, &page_no,
@@ -2254,14 +2244,14 @@ trx_undo_get_undo_rec_low(
rseg = trx_sys.rseg_array[rseg_id];
ut_ad(rseg->is_persistent());
- mtr_start(&mtr);
+ mtr.start();
- undo_page = trx_undo_page_get_s_latched(
+ buf_block_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(rseg->space->id, page_no), &mtr);
- undo_rec = trx_undo_rec_copy(undo_page + offset, heap);
+ undo_rec = trx_undo_rec_copy(undo_page->frame + offset, heap);
- mtr_commit(&mtr);
+ mtr.commit();
return(undo_rec);
}
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index c33b4fda6ae..c8727a6d5bf 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -49,7 +49,7 @@ static unsigned char wsrep_uuid[16];
@param[in,out] mtr mini transaction */
static void
trx_rseg_write_wsrep_checkpoint(
- trx_rsegf_t* rseg_header,
+ buf_block_t* rseg_header,
const XID* xid,
mtr_t* mtr)
{
@@ -57,25 +57,27 @@ trx_rseg_write_wsrep_checkpoint(
DBUG_ASSERT(xid->bqual_length >= 0);
DBUG_ASSERT(xid->gtrid_length + xid->bqual_length < XIDDATASIZE);
- mlog_write_ulint(TRX_RSEG_WSREP_XID_FORMAT + rseg_header,
- uint32_t(xid->formatID),
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_WSREP_XID_FORMAT
+ + rseg_header->frame,
+ uint32_t(xid->formatID));
- mlog_write_ulint(TRX_RSEG_WSREP_XID_GTRID_LEN + rseg_header,
- uint32_t(xid->gtrid_length),
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_WSREP_XID_GTRID_LEN
+ + rseg_header->frame,
+ uint32_t(xid->gtrid_length));
- mlog_write_ulint(TRX_RSEG_WSREP_XID_BQUAL_LEN + rseg_header,
- uint32_t(xid->bqual_length),
- MLOG_4BYTES, mtr);
+ mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_WSREP_XID_BQUAL_LEN
+ + rseg_header->frame,
+ uint32_t(xid->bqual_length));
const ulint xid_length = static_cast<ulint>(xid->gtrid_length
+ xid->bqual_length);
- mlog_write_string(TRX_RSEG_WSREP_XID_DATA + rseg_header,
+ mlog_write_string(TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ + rseg_header->frame,
reinterpret_cast<const byte*>(xid->data),
xid_length, mtr);
if (UNIV_LIKELY(xid_length < XIDDATASIZE)) {
- mlog_memset(TRX_RSEG_WSREP_XID_DATA + rseg_header + xid_length,
+ mlog_memset(TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ + rseg_header->frame + xid_length,
XIDDATASIZE - xid_length, 0, mtr);
}
}
@@ -86,7 +88,7 @@ trx_rseg_write_wsrep_checkpoint(
@param[in,out] mtr mini-transaction */
void
trx_rseg_update_wsrep_checkpoint(
- trx_rsegf_t* rseg_header,
+ buf_block_t* rseg_header,
const XID* xid,
mtr_t* mtr)
{
@@ -109,16 +111,13 @@ trx_rseg_update_wsrep_checkpoint(
}
/** Clear the WSREP XID information from rollback segment header.
-@param[in,out] rseg_header Rollback segment header
-@param[in,out] mtr mini-transaction */
-static void
-trx_rseg_clear_wsrep_checkpoint(
- trx_rsegf_t* rseg_header,
- mtr_t* mtr)
+@param[in,out] block rollback segment header
+@param[in,out] mtr mini-transaction */
+static void trx_rseg_clear_wsrep_checkpoint(buf_block_t *block, mtr_t *mtr)
{
- mlog_memset(rseg_header + TRX_RSEG_WSREP_XID_INFO,
- TRX_RSEG_WSREP_XID_DATA + XIDDATASIZE
- - TRX_RSEG_WSREP_XID_INFO, 0, mtr);
+ mlog_memset(block, TRX_RSEG + TRX_RSEG_WSREP_XID_INFO,
+ TRX_RSEG_WSREP_XID_DATA + XIDDATASIZE - TRX_RSEG_WSREP_XID_INFO,
+ 0, mtr);
}
static void
@@ -133,9 +132,10 @@ trx_rseg_update_wsrep_checkpoint(const XID* xid, mtr_t* mtr)
sizeof wsrep_uuid);
const trx_rseg_t* rseg = trx_sys.rseg_array[0];
- trx_rsegf_t* rseg_header = trx_rsegf_get(rseg->space, rseg->page_no,
+ buf_block_t* rseg_header = trx_rsegf_get(rseg->space, rseg->page_no,
mtr);
- if (UNIV_UNLIKELY(mach_read_from_4(rseg_header + TRX_RSEG_FORMAT))) {
+ if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ + rseg_header->frame))) {
trx_rseg_format_upgrade(rseg_header, mtr);
}
@@ -178,25 +178,26 @@ void trx_rseg_update_wsrep_checkpoint(const XID* xid)
@param[out] xid Transaction XID
@return whether the WSREP XID was present */
static
-bool trx_rseg_read_wsrep_checkpoint(const trx_rsegf_t* rseg_header, XID& xid)
+bool trx_rseg_read_wsrep_checkpoint(const buf_block_t *rseg_header, XID &xid)
{
int formatID = static_cast<int>(
- mach_read_from_4(
- TRX_RSEG_WSREP_XID_FORMAT + rseg_header));
+ mach_read_from_4(TRX_RSEG + TRX_RSEG_WSREP_XID_FORMAT
+ + rseg_header->frame));
if (formatID == 0) {
return false;
}
xid.formatID = formatID;
xid.gtrid_length = static_cast<int>(
- mach_read_from_4(
- TRX_RSEG_WSREP_XID_GTRID_LEN + rseg_header));
+ mach_read_from_4(TRX_RSEG + TRX_RSEG_WSREP_XID_GTRID_LEN
+ + rseg_header->frame));
xid.bqual_length = static_cast<int>(
- mach_read_from_4(
- TRX_RSEG_WSREP_XID_BQUAL_LEN + rseg_header));
+ mach_read_from_4(TRX_RSEG + TRX_RSEG_WSREP_XID_BQUAL_LEN
+ + rseg_header->frame));
- memcpy(xid.data, TRX_RSEG_WSREP_XID_DATA + rseg_header, XIDDATASIZE);
+ memcpy(xid.data, TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ + rseg_header->frame, XIDDATASIZE);
return true;
}
@@ -252,10 +253,11 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid)
continue;
}
- const trx_rsegf_t* rseg_header = trx_rsegf_get_new(
+ const buf_block_t* rseg_header = trx_rsegf_get_new(
trx_sysf_rseg_get_space(sys, rseg_id), page_no, &mtr);
- if (mach_read_from_4(rseg_header + TRX_RSEG_FORMAT)) {
+ if (mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ + rseg_header->frame)) {
continue;
}
@@ -279,17 +281,15 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid)
/** Upgrade a rollback segment header page to MariaDB 10.3 format.
@param[in,out] rseg_header rollback segment header page
@param[in,out] mtr mini-transaction */
-void trx_rseg_format_upgrade(trx_rsegf_t* rseg_header, mtr_t* mtr)
+void trx_rseg_format_upgrade(buf_block_t *rseg_header, mtr_t *mtr)
{
- ut_ad(page_offset(rseg_header) == TRX_RSEG);
- byte* rseg_format = TRX_RSEG_FORMAT + rseg_header;
- mlog_write_ulint(rseg_format, 0, MLOG_4BYTES, mtr);
- /* Clear also possible garbage at the end of the page. Old
- InnoDB versions did not initialize unused parts of pages. */
- mlog_memset(TRX_RSEG_MAX_TRX_ID + 8 + rseg_header,
- srv_page_size
- - (FIL_PAGE_DATA_END
- + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8), 0, mtr);
+ mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_FORMAT, 4, 0, mtr);
+ /* Clear also possible garbage at the end of the page. Old
+ InnoDB versions did not initialize unused parts of pages. */
+ mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8,
+ srv_page_size
+ - (FIL_PAGE_DATA_END + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8),
+ 0, mtr);
}
/** Create a rollback segment header.
@@ -337,16 +337,16 @@ trx_rseg_header_create(
/* Add the rollback segment info to the free slot in
the trx system header */
- mlog_write_ulint(TRX_SYS + TRX_SYS_RSEGS
- + TRX_SYS_RSEG_SPACE
- + rseg_id * TRX_SYS_RSEG_SLOT_SIZE
- + sys_header->frame,
- space->id, MLOG_4BYTES, mtr);
- mlog_write_ulint(TRX_SYS + TRX_SYS_RSEGS
- + TRX_SYS_RSEG_PAGE_NO
- + rseg_id * TRX_SYS_RSEG_SLOT_SIZE
- + sys_header->frame,
- block->page.id.page_no(), MLOG_4BYTES, mtr);
+ mtr->write<4,mtr_t::OPT>(
+ *sys_header,
+ TRX_SYS + TRX_SYS_RSEGS + TRX_SYS_RSEG_SPACE
+ + rseg_id * TRX_SYS_RSEG_SLOT_SIZE
+ + sys_header->frame, space->id);
+ mtr->write<4,mtr_t::OPT>(
+ *sys_header,
+ TRX_SYS + TRX_SYS_RSEGS + TRX_SYS_RSEG_PAGE_NO
+ + rseg_id * TRX_SYS_RSEG_SLOT_SIZE
+ + sys_header->frame, block->page.id.page_no());
}
return block;
@@ -410,29 +410,28 @@ trx_rseg_mem_create(ulint id, fil_space_t* space, ulint page_no)
}
/** Read the undo log lists.
-@param[in,out] rseg rollback segment
-@param[in,out] max_trx_id maximum observed transaction identifier
-@param[in] rseg_header rollback segment header
+@param[in,out] rseg rollback segment
+@param[in,out] max_trx_id maximum observed transaction identifier
+@param[in] rseg_header rollback segment header
@return the combined size of undo log segments in pages */
-static
-ulint
-trx_undo_lists_init(trx_rseg_t* rseg, trx_id_t& max_trx_id,
- const trx_rsegf_t* rseg_header)
+static ulint trx_undo_lists_init(trx_rseg_t *rseg, trx_id_t &max_trx_id,
+ const buf_block_t *rseg_header)
{
- ut_ad(srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN);
+ ut_ad(srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN);
- ulint size = 0;
+ ulint size= 0;
- for (ulint i = 0; i < TRX_RSEG_N_SLOTS; i++) {
- ulint page_no = trx_rsegf_get_nth_undo(rseg_header, i);
- if (page_no != FIL_NULL) {
- size += trx_undo_mem_create_at_db_start(
- rseg, i, page_no, max_trx_id);
- MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
- }
- }
+ for (ulint i= 0; i < TRX_RSEG_N_SLOTS; i++)
+ {
+ uint32_t page_no= trx_rsegf_get_nth_undo(rseg_header, i);
+ if (page_no != FIL_NULL)
+ {
+ size+= trx_undo_mem_create_at_db_start(rseg, i, page_no, max_trx_id);
+ MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
+ }
+ }
- return(size);
+ return size;
}
/** Restore the state of a persistent rollback segment.
@@ -443,20 +442,20 @@ static
void
trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
{
- trx_rsegf_t* rseg_header = trx_rsegf_get_new(
+ buf_block_t* rseg_hdr = trx_rsegf_get_new(
rseg->space->id, rseg->page_no, mtr);
- if (mach_read_from_4(rseg_header + TRX_RSEG_FORMAT) == 0) {
- trx_id_t id = mach_read_from_8(rseg_header
- + TRX_RSEG_MAX_TRX_ID);
+ if (!mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT + rseg_hdr->frame)) {
+ trx_id_t id = mach_read_from_8(TRX_RSEG + TRX_RSEG_MAX_TRX_ID
+ + rseg_hdr->frame);
if (id > max_trx_id) {
max_trx_id = id;
}
- if (rseg_header[TRX_RSEG_BINLOG_NAME]) {
- const char* binlog_name = reinterpret_cast<const char*>
- (rseg_header) + TRX_RSEG_BINLOG_NAME;
+ const char* binlog_name = TRX_RSEG + TRX_RSEG_BINLOG_NAME
+ + reinterpret_cast<const char*>(rseg_hdr->frame);
+ if (*binlog_name) {
compile_time_assert(TRX_RSEG_BINLOG_NAME_LEN == sizeof
trx_sys.recovered_binlog_filename);
@@ -468,7 +467,8 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
if (cmp >= 0) {
uint64_t binlog_offset = mach_read_from_8(
- rseg_header + TRX_RSEG_BINLOG_OFFSET);
+ TRX_RSEG + TRX_RSEG_BINLOG_OFFSET
+ + rseg_hdr->frame);
if (cmp) {
memcpy(trx_sys.
recovered_binlog_filename,
@@ -485,7 +485,7 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
#ifdef WITH_WSREP
trx_rseg_read_wsrep_checkpoint(
- rseg_header, trx_sys.recovered_wsrep_xid);
+ rseg_hdr, trx_sys.recovered_wsrep_xid);
#endif
}
}
@@ -499,32 +499,37 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
/* Initialize the undo log lists according to the rseg header */
- rseg->curr_size = mach_read_from_4(rseg_header + TRX_RSEG_HISTORY_SIZE)
- + 1 + trx_undo_lists_init(rseg, max_trx_id, rseg_header);
+ rseg->curr_size = mach_read_from_4(TRX_RSEG + TRX_RSEG_HISTORY_SIZE
+ + rseg_hdr->frame)
+ + 1 + trx_undo_lists_init(rseg, max_trx_id, rseg_hdr);
- if (auto len = flst_get_len(rseg_header + TRX_RSEG_HISTORY)) {
+ if (auto len = flst_get_len(TRX_RSEG + TRX_RSEG_HISTORY
+ + rseg_hdr->frame)) {
trx_sys.rseg_history_len += len;
fil_addr_t node_addr = trx_purge_get_log_from_hist(
- flst_get_last(rseg_header + TRX_RSEG_HISTORY));
+ flst_get_last(TRX_RSEG + TRX_RSEG_HISTORY
+ + rseg_hdr->frame));
rseg->last_page_no = node_addr.page;
rseg->last_offset = node_addr.boffset;
- const trx_ulogf_t* undo_log_hdr = trx_undo_page_get(
- page_id_t(rseg->space->id, node_addr.page), mtr)
- + node_addr.boffset;
+ const buf_block_t* block = trx_undo_page_get(
+ page_id_t(rseg->space->id, node_addr.page), mtr);
- trx_id_t id = mach_read_from_8(undo_log_hdr + TRX_UNDO_TRX_ID);
+ trx_id_t id = mach_read_from_8(block->frame + node_addr.boffset
+ + TRX_UNDO_TRX_ID);
if (id > max_trx_id) {
max_trx_id = id;
}
- id = mach_read_from_8(undo_log_hdr + TRX_UNDO_TRX_NO);
+ id = mach_read_from_8(block->frame + node_addr.boffset
+ + TRX_UNDO_TRX_NO);
if (id > max_trx_id) {
max_trx_id = id;
}
- unsigned purge = mach_read_from_2(
- undo_log_hdr + TRX_UNDO_NEEDS_PURGE);
+ unsigned purge = mach_read_from_2(block->frame
+ + node_addr.boffset
+ + TRX_UNDO_NEEDS_PURGE);
ut_ad(purge <= 1);
rseg->set_last_trx_no(id, purge != 0);
rseg->needs_purge = purge != 0;
@@ -638,8 +643,8 @@ trx_rseg_array_init()
}
/* Finally, clear WSREP XID in TRX_SYS page. */
- const buf_block_t* sys = trx_sysf_get(&mtr);
- mlog_memset(TRX_SYS + TRX_SYS_WSREP_XID_INFO + sys->frame,
+ mlog_memset(trx_sysf_get(&mtr),
+ TRX_SYS + TRX_SYS_WSREP_XID_INFO,
TRX_SYS_WSREP_XID_LEN, 0, &mtr);
mtr.commit();
}
@@ -765,8 +770,8 @@ up to which replication has proceeded.
@param[in,out] rseg_header rollback segment header
@param[in] trx committing transaction
@param[in,out] mtr mini-transaction */
-void
-trx_rseg_update_binlog_offset(byte* rseg_header, const trx_t* trx, mtr_t* mtr)
+void trx_rseg_update_binlog_offset(buf_block_t *rseg_header, const trx_t *trx,
+ mtr_t *mtr)
{
DBUG_LOG("trx", "trx_mysql_binlog_offset: " << trx->mysql_log_offset);
@@ -778,9 +783,11 @@ trx_rseg_update_binlog_offset(byte* rseg_header, const trx_t* trx, mtr_t* mtr)
return;
}
- mlog_write_ull(rseg_header + TRX_RSEG_BINLOG_OFFSET,
- trx->mysql_log_offset, mtr);
- byte* p = rseg_header + TRX_RSEG_BINLOG_NAME;
+ mtr->write<8,mtr_t::OPT>(*rseg_header,
+ TRX_RSEG + TRX_RSEG_BINLOG_OFFSET
+ + rseg_header->frame,
+ trx->mysql_log_offset);
+ byte* p = TRX_RSEG + TRX_RSEG_BINLOG_NAME + rseg_header->frame;
const byte* binlog_name = reinterpret_cast<const byte*>
(trx->mysql_log_file_name);
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 4c0ca852aef..c156983096f 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -147,8 +147,6 @@ trx_sysf_create(
{
ulint slot_no;
buf_block_t* block;
- page_t* page;
- byte* ptr;
ut_ad(mtr);
@@ -167,30 +165,28 @@ trx_sysf_create(
ut_a(block->page.id.page_no() == TRX_SYS_PAGE_NO);
- page = buf_block_get_frame(block);
+ mtr->write<2>(*block, FIL_PAGE_TYPE + block->frame,
+ FIL_PAGE_TYPE_TRX_SYS);
- mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_TYPE_TRX_SYS,
- MLOG_2BYTES, mtr);
-
- /* Reset the doublewrite buffer magic number to zero so that we
- know that the doublewrite buffer has not yet been created (this
- suppresses a Valgrind warning) */
-
- mlog_write_ulint(page + TRX_SYS_DOUBLEWRITE
- + TRX_SYS_DOUBLEWRITE_MAGIC, 0, MLOG_4BYTES, mtr);
+ ut_ad(!mach_read_from_4(block->frame
+ + TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_MAGIC));
/* Reset the rollback segment slots. Old versions of InnoDB
(before MySQL 5.5) define TRX_SYS_N_RSEGS as 256 and expect
that the whole array is initialized. */
- ptr = TRX_SYS + TRX_SYS_RSEGS + page;
compile_time_assert(256 >= TRX_SYS_N_RSEGS);
- memset(ptr, 0xff, 256 * TRX_SYS_RSEG_SLOT_SIZE);
- ptr += 256 * TRX_SYS_RSEG_SLOT_SIZE;
- ut_a(ptr <= page + (srv_page_size - FIL_PAGE_DATA_END));
-
+ compile_time_assert(TRX_SYS + TRX_SYS_RSEGS
+ + 256 * TRX_SYS_RSEG_SLOT_SIZE
+ <= UNIV_PAGE_SIZE_MIN - FIL_PAGE_DATA_END);
+ mlog_memset(block, TRX_SYS + TRX_SYS_RSEGS,
+ 256 * TRX_SYS_RSEG_SLOT_SIZE, 0xff, mtr);
/* Initialize all of the page. This part used to be uninitialized. */
- mlog_memset(block, ptr - page,
- srv_page_size - FIL_PAGE_DATA_END + size_t(page - ptr),
+ mlog_memset(block, TRX_SYS + TRX_SYS_RSEGS
+ + 256 * TRX_SYS_RSEG_SLOT_SIZE,
+ srv_page_size
+ - (FIL_PAGE_DATA_END + TRX_SYS + TRX_SYS_RSEGS
+ + 256 * TRX_SYS_RSEG_SLOT_SIZE),
0, mtr);
/* Create the first rollback segment in the SYSTEM tablespace */
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index bf26b5b20ef..0f4df89e068 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -565,8 +565,6 @@ trx_resurrect_table_locks(
const trx_undo_t* undo) /*!< in: undo log */
{
mtr_t mtr;
- page_t* undo_page;
- trx_undo_rec_t* undo_rec;
table_id_set tables;
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE) ||
@@ -581,11 +579,11 @@ trx_resurrect_table_locks(
/* trx_rseg_mem_create() may have acquired an X-latch on this
page, so we cannot acquire an S-latch. */
- undo_page = trx_undo_page_get(
+ buf_block_t* block = trx_undo_page_get(
page_id_t(trx->rsegs.m_redo.rseg->space->id,
undo->top_page_no), &mtr);
-
- undo_rec = undo_page + undo->top_offset;
+ buf_block_t* undo_block = block;
+ trx_undo_rec_t* undo_rec = block->frame + undo->top_offset;
do {
ulint type;
@@ -594,11 +592,9 @@ trx_resurrect_table_locks(
ulint cmpl_info;
bool updated_extern;
- page_t* undo_rec_page = page_align(undo_rec);
-
- if (undo_rec_page != undo_page) {
- mtr.release_page(undo_page, MTR_MEMO_PAGE_X_FIX);
- undo_page = undo_rec_page;
+ if (undo_block != block) {
+ mtr.memo_release(undo_block, MTR_MEMO_PAGE_X_FIX);
+ undo_block = block;
}
trx_undo_rec_get_pars(
@@ -607,7 +603,7 @@ trx_resurrect_table_locks(
tables.insert(table_id);
undo_rec = trx_undo_get_prev_rec(
- undo_rec, undo->hdr_page_no,
+ block, page_offset(undo_rec), undo->hdr_page_no,
undo->hdr_offset, false, &mtr);
} while (undo_rec);
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 60622582480..ba237ce87e0 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -105,37 +105,36 @@ trx_undo_mem_create(
trx_id_t trx_id, /*!< in: id of the trx for which the undo log
is created */
const XID* xid, /*!< in: X/Open XA transaction identification*/
- ulint page_no,/*!< in: undo log header page number */
- ulint offset);/*!< in: undo log header byte offset on page */
+ uint32_t page_no,/*!< in: undo log header page number */
+ uint16_t offset);/*!< in: undo log header byte offset on page */
/** Determine the start offset of undo log records of an undo log page.
-@param[in] undo_page undo log page
+@param[in] block undo log page
@param[in] page_no undo log header page number
@param[in] offset undo log header offset
@return start offset */
static
-uint16_t
-trx_undo_page_get_start(const page_t* undo_page, ulint page_no, ulint offset)
+uint16_t trx_undo_page_get_start(const buf_block_t *block, uint32_t page_no,
+ uint16_t offset)
{
- return page_no == page_get_page_no(undo_page)
- ? mach_read_from_2(offset + TRX_UNDO_LOG_START + undo_page)
- : TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE;
+ return page_no == block->page.id.page_no()
+ ? mach_read_from_2(offset + TRX_UNDO_LOG_START + block->frame)
+ : TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE;
}
/** Get the first undo log record on a page.
-@param[in] page undo log page
+@param[in] block undo log page
@param[in] page_no undo log header page number
@param[in] offset undo log header page offset
@return pointer to first record
@retval NULL if none exists */
-static
-trx_undo_rec_t*
-trx_undo_page_get_first_rec(page_t* page, ulint page_no, ulint offset)
+static trx_undo_rec_t*
+trx_undo_page_get_first_rec(const buf_block_t *block, uint32_t page_no,
+ uint16_t offset)
{
- ulint start = trx_undo_page_get_start(page, page_no, offset);
- return start == trx_undo_page_get_end(page, page_no, offset)
- ? NULL
- : page + start;
+ uint16_t start= trx_undo_page_get_start(block, page_no, offset);
+ return start == trx_undo_page_get_end(block, page_no, offset)
+ ? nullptr : block->frame + start;
}
/** Get the last undo log record on a page.
@@ -146,58 +145,43 @@ trx_undo_page_get_first_rec(page_t* page, ulint page_no, ulint offset)
@retval NULL if none exists */
static
trx_undo_rec_t*
-trx_undo_page_get_last_rec(page_t* page, ulint page_no, ulint offset)
+trx_undo_page_get_last_rec(const buf_block_t *block, uint32_t page_no,
+ uint16_t offset)
{
- ulint end = trx_undo_page_get_end(page, page_no, offset);
-
- return trx_undo_page_get_start(page, page_no, offset) == end
- ? NULL
- : page + mach_read_from_2(page + end - 2);
+ uint16_t end= trx_undo_page_get_end(block, page_no, offset);
+ return trx_undo_page_get_start(block, page_no, offset) == end
+ ? nullptr : block->frame + mach_read_from_2(block->frame + end - 2);
}
-/***********************************************************************//**
-Gets the previous record in an undo log from the previous page.
-@return undo log record, the page s-latched, NULL if none */
-static
-trx_undo_rec_t*
-trx_undo_get_prev_rec_from_prev_page(
-/*=================================*/
- trx_undo_rec_t* rec, /*!< in: undo record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset, /*!< in: undo log header offset on page */
- bool shared, /*!< in: true=S-latch, false=X-latch */
- mtr_t* mtr) /*!< in: mtr */
+/** Get the previous record in an undo log from the previous page.
+@param[in,out] block undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in] shared latching mode: true=RW_S_LATCH, false=RW_X_LATCH
+@param[in,out] mtr mini-transaction
+@return undo log record, the page latched, NULL if none */
+static trx_undo_rec_t*
+trx_undo_get_prev_rec_from_prev_page(buf_block_t *&block, uint16_t rec,
+ uint32_t page_no, uint16_t offset,
+ bool shared, mtr_t *mtr)
{
- ulint space;
- ulint prev_page_no;
- page_t* prev_page;
- page_t* undo_page;
+ uint32_t prev_page_no= flst_get_prev_addr(TRX_UNDO_PAGE_HDR +
+ TRX_UNDO_PAGE_NODE +
+ block->frame).page;
- undo_page = page_align(rec);
+ if (prev_page_no == FIL_NULL)
+ return NULL;
- prev_page_no = flst_get_prev_addr(undo_page + TRX_UNDO_PAGE_HDR
- + TRX_UNDO_PAGE_NODE)
- .page;
+ block = buf_page_get(page_id_t(block->page.id.space(), prev_page_no),
+ 0, shared ? RW_S_LATCH : RW_X_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
- if (prev_page_no == FIL_NULL) {
-
- return(NULL);
- }
-
- space = page_get_space_id(undo_page);
-
- buf_block_t* block = buf_page_get(
- page_id_t(space, prev_page_no), 0,
- shared ? RW_S_LATCH : RW_X_LATCH, mtr);
-
- buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
-
- prev_page = buf_block_get_frame(block);
-
- return(trx_undo_page_get_last_rec(prev_page, page_no, offset));
+ return trx_undo_page_get_last_rec(block, page_no, offset);
}
/** Get the previous undo log record.
+@param[in] block undo log page
@param[in] rec undo log record
@param[in] page_no undo log header page number
@param[in] offset undo log header page offset
@@ -205,169 +189,105 @@ trx_undo_get_prev_rec_from_prev_page(
@retval NULL if none */
static
trx_undo_rec_t*
-trx_undo_page_get_prev_rec(trx_undo_rec_t* rec, ulint page_no, ulint offset)
+trx_undo_page_get_prev_rec(const buf_block_t *block, trx_undo_rec_t *rec,
+ uint32_t page_no, uint16_t offset)
{
- page_t* undo_page;
- ulint start;
-
- undo_page = (page_t*) ut_align_down(rec, srv_page_size);
-
- start = trx_undo_page_get_start(undo_page, page_no, offset);
-
- if (start + undo_page == rec) {
-
- return(NULL);
- }
-
- return(undo_page + mach_read_from_2(rec - 2));
+ ut_ad(block->frame == page_align(rec));
+ return rec == block->frame + trx_undo_page_get_start(block, page_no, offset)
+ ? nullptr
+ : block->frame + mach_read_from_2(rec - 2);
}
-/***********************************************************************//**
-Gets the previous record in an undo log.
-@return undo log record, the page s-latched, NULL if none */
+/** Get the previous record in an undo log.
+@param[in,out] block undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in] shared latching mode: true=RW_S_LATCH, false=RW_X_LATCH
+@param[in,out] mtr mini-transaction
+@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
-trx_undo_get_prev_rec(
-/*==================*/
- trx_undo_rec_t* rec, /*!< in: undo record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset, /*!< in: undo log header offset on page */
- bool shared, /*!< in: true=S-latch, false=X-latch */
- mtr_t* mtr) /*!< in: mtr */
+trx_undo_get_prev_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
+ uint16_t offset, bool shared, mtr_t *mtr)
{
- trx_undo_rec_t* prev_rec;
-
- prev_rec = trx_undo_page_get_prev_rec(rec, page_no, offset);
+ if (trx_undo_rec_t *prev= trx_undo_page_get_prev_rec(block,
+ block->frame + rec,
+ page_no, offset))
+ return prev;
- if (prev_rec) {
-
- return(prev_rec);
- }
+ /* We have to go to the previous undo log page to look for the
+ previous record */
- /* We have to go to the previous undo log page to look for the
- previous record */
-
- return(trx_undo_get_prev_rec_from_prev_page(rec, page_no, offset,
- shared, mtr));
+ return trx_undo_get_prev_rec_from_prev_page(block, rec, page_no, offset,
+ shared, mtr);
}
-/** Gets the next record in an undo log from the next page.
-@param[in] space undo log header space
-@param[in] undo_page undo log page
-@param[in] page_no undo log header page number
-@param[in] offset undo log header offset on page
-@param[in] mode latch mode: RW_S_LATCH or RW_X_LATCH
-@param[in,out] mtr mini-transaction
+/** Get the next record in an undo log from the next page.
+@param[in,out] block undo log page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
+@param[in,out] mtr mini-transaction
@return undo log record, the page latched, NULL if none */
-static
-trx_undo_rec_t*
-trx_undo_get_next_rec_from_next_page(
- ulint space,
- const page_t* undo_page,
- ulint page_no,
- ulint offset,
- ulint mode,
- mtr_t* mtr)
+static trx_undo_rec_t*
+trx_undo_get_next_rec_from_next_page(buf_block_t *&block, uint32_t page_no,
+ uint16_t offset, ulint mode, mtr_t *mtr)
{
- const trx_ulogf_t* log_hdr;
- ulint next_page_no;
- page_t* next_page;
- ulint next;
-
- if (page_no == page_get_page_no(undo_page)) {
-
- log_hdr = undo_page + offset;
- next = mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG);
-
- if (next != 0) {
-
- return(NULL);
- }
- }
-
- next_page_no = flst_get_next_addr(TRX_UNDO_PAGE_HDR
- + TRX_UNDO_PAGE_NODE + undo_page)
- .page;
- if (next_page_no == FIL_NULL) {
-
- return(NULL);
- }
+ if (page_no == block->page.id.page_no() &&
+ mach_read_from_2(block->frame + offset + TRX_UNDO_NEXT_LOG))
+ return NULL;
- const page_id_t next_page_id(space, next_page_no);
+ ulint next= flst_get_next_addr(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE +
+ block->frame).page;
+ if (next == FIL_NULL)
+ return NULL;
- if (mode == RW_S_LATCH) {
- next_page = trx_undo_page_get_s_latched(
- next_page_id, mtr);
- } else {
- ut_ad(mode == RW_X_LATCH);
- next_page = trx_undo_page_get(next_page_id, mtr);
- }
+ block= buf_page_get(page_id_t(block->page.id.space(), next), 0, mode, mtr);
+ buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
- return(trx_undo_page_get_first_rec(next_page, page_no, offset));
+ return trx_undo_page_get_first_rec(block, page_no, offset);
}
-/***********************************************************************//**
-Gets the next record in an undo log.
-@return undo log record, the page s-latched, NULL if none */
+/** Get the next record in an undo log.
+@param[in,out] block undo log page
+@param[in] rec undo record offset in the page
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in,out] mtr mini-transaction
+@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
-trx_undo_get_next_rec(
-/*==================*/
- trx_undo_rec_t* rec, /*!< in: undo record */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset, /*!< in: undo log header offset on page */
- mtr_t* mtr) /*!< in: mtr */
+trx_undo_get_next_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
+ uint16_t offset, mtr_t *mtr)
{
- ulint space;
- trx_undo_rec_t* next_rec;
+ if (trx_undo_rec_t *next= trx_undo_page_get_next_rec(block, rec, page_no,
+ offset))
+ return next;
- next_rec = trx_undo_page_get_next_rec(rec, page_no, offset);
-
- if (next_rec) {
- return(next_rec);
- }
-
- space = page_get_space_id(page_align(rec));
-
- return(trx_undo_get_next_rec_from_next_page(space,
- page_align(rec),
- page_no, offset,
- RW_S_LATCH, mtr));
+ return trx_undo_get_next_rec_from_next_page(block, page_no, offset,
+ RW_S_LATCH, mtr);
}
-/** Gets the first record in an undo log.
-@param[in] space undo log header space
-@param[in] page_no undo log header page number
-@param[in] offset undo log header offset on page
-@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
-@param[in,out] mtr mini-transaction
+/** Get the first record in an undo log.
+@param[in] space undo log header space
+@param[in] page_no undo log header page number
+@param[in] offset undo log header offset on page
+@param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
+@param[out] block undo log page
+@param[in,out] mtr mini-transaction
@return undo log record, the page latched, NULL if none */
trx_undo_rec_t*
-trx_undo_get_first_rec(
- fil_space_t* space,
- ulint page_no,
- ulint offset,
- ulint mode,
- mtr_t* mtr)
+trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
+ uint16_t offset, ulint mode, buf_block_t*& block,
+ mtr_t *mtr)
{
- page_t* undo_page;
- trx_undo_rec_t* rec;
-
- const page_id_t page_id(space->id, page_no);
-
- if (mode == RW_S_LATCH) {
- undo_page = trx_undo_page_get_s_latched(page_id, mtr);
- } else {
- undo_page = trx_undo_page_get(page_id, mtr);
- }
-
- rec = trx_undo_page_get_first_rec(undo_page, page_no, offset);
+ block = buf_page_get(page_id_t(space.id, page_no), 0, mode, mtr);
+ buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
- if (rec) {
- return(rec);
- }
+ if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(block, page_no, offset))
+ return rec;
- return(trx_undo_get_next_rec_from_next_page(space->id,
- undo_page, page_no, offset,
- mode, mtr));
+ return trx_undo_get_next_rec_from_next_page(block, page_no, offset, mode,
+ mtr);
}
/*============== UNDO LOG FILE COPY CREATION AND FREEING ==================*/
@@ -455,9 +375,9 @@ trx_undo_parse_page_header_reuse(
}
/** Initialize the fields in an undo log segment page.
-@param[in,out] undo_block undo page
+@param[in,out] undo_block undo log segment page
@param[in,out] mtr mini-transaction */
-static void trx_undo_page_init(buf_block_t* undo_block, mtr_t* mtr)
+static void trx_undo_page_init(const buf_block_t *undo_block, mtr_t *mtr)
{
page_t* page = undo_block->frame;
mach_write_to_2(FIL_PAGE_TYPE + page, FIL_PAGE_UNDO_LOG);
@@ -489,6 +409,26 @@ static void trx_undo_page_init(buf_block_t* undo_block, mtr_t* mtr)
mlog_close(mtr, log_ptr);
}
+/** Look for a free slot for an undo log segment.
+@param rseg_header rollback segment header
+@return slot index
+@retval ULINT_UNDEFINED if not found */
+static ulint trx_rsegf_undo_find_free(const buf_block_t *rseg_header)
+{
+ ulint max_slots= TRX_RSEG_N_SLOTS;
+
+#ifdef UNIV_DEBUG
+ if (trx_rseg_n_slots_debug)
+ max_slots= std::min<ulint>(trx_rseg_n_slots_debug, TRX_RSEG_N_SLOTS);
+#endif
+
+ for (ulint i= 0; i < max_slots; i++)
+ if (trx_rsegf_get_nth_undo(rseg_header, i) == FIL_NULL)
+ return i;
+
+ return ULINT_UNDEFINED;
+}
+
/** Create an undo log segment.
@param[in,out] space tablespace
@param[in,out] rseg_hdr rollback segment header (x-latched)
@@ -499,15 +439,14 @@ static void trx_undo_page_init(buf_block_t* undo_block, mtr_t* mtr)
@retval NULL on failure */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
buf_block_t*
-trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id,
- dberr_t* err, mtr_t* mtr)
+trx_undo_seg_create(fil_space_t *space, buf_block_t *rseg_hdr, ulint *id,
+ dberr_t *err, mtr_t *mtr)
{
- ulint slot_no;
buf_block_t* block;
ulint n_reserved;
bool success;
- slot_no = trx_rsegf_undo_find_free(rseg_hdr);
+ const ulint slot_no = trx_rsegf_undo_find_free(rseg_hdr);
if (slot_no == ULINT_UNDEFINED) {
ib::warn() << "Cannot find a free slot for an undo log. Do"
@@ -518,6 +457,8 @@ trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id,
return NULL;
}
+ ut_ad(slot_no < TRX_RSEG_N_SLOTS);
+
success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_UNDO,
mtr);
if (!success) {
@@ -540,22 +481,22 @@ trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id,
trx_undo_page_init(block, mtr);
- mlog_write_ulint(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + block->frame,
- TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE,
- MLOG_2BYTES, mtr);
-
- mlog_write_ulint(TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG + block->frame,
- 0, MLOG_2BYTES, mtr);
+ mtr->write<2,mtr_t::OPT>(*block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ + block->frame,
+ TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE);
+ mtr->write<2,mtr_t::OPT>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG
+ + block->frame, 0U);
- flst_init(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->frame, mtr);
+ flst_init(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->frame,
+ mtr);
- flst_add_last(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->frame,
- TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + block->frame,
- mtr);
+ flst_add_last(block, TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
+ block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
*id = slot_no;
- trx_rsegf_set_nth_undo(rseg_hdr, slot_no, block->page.id.page_no(),
- mtr);
+ mtr->write<4>(*rseg_hdr, TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ + slot_no * TRX_RSEG_SLOT_SIZE + rseg_hdr->frame,
+ block->page.id.page_no());
MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
@@ -563,77 +504,43 @@ trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id,
return block;
}
-/**********************************************************************//**
-Writes the mtr log entry of an undo log header initialization. */
-UNIV_INLINE
-void
-trx_undo_header_create_log(
-/*=======================*/
- const page_t* undo_page, /*!< in: undo log header page */
- trx_id_t trx_id, /*!< in: transaction id */
- mtr_t* mtr) /*!< in: mtr */
-{
- mlog_write_initial_log_record(undo_page, MLOG_UNDO_HDR_CREATE, mtr);
-
- mlog_catenate_ull_compressed(mtr, trx_id);
-}
-
/***************************************************************//**
Creates a new undo log header in file. NOTE that this function has its own
log record type MLOG_UNDO_HDR_CREATE. You must NOT change the operation of
this function!
+@param[in,out] undo_page undo log segment header page
+@param[in] trx_id transaction identifier
+@param[in,out] mtr mini-transaction
@return header byte offset on page */
-static
-ulint
-trx_undo_header_create(
-/*===================*/
- page_t* undo_page, /*!< in/out: undo log segment
- header page, x-latched; it is
- assumed that there is
- TRX_UNDO_LOG_XA_HDR_SIZE bytes
- free space on it */
- trx_id_t trx_id, /*!< in: transaction id */
- mtr_t* mtr) /*!< in: mtr */
+static uint16_t trx_undo_header_create(buf_block_t *undo_page, trx_id_t trx_id,
+ mtr_t* mtr)
{
- trx_upagef_t* page_hdr;
- trx_usegf_t* seg_hdr;
- trx_ulogf_t* log_hdr;
- ulint prev_log;
- ulint free;
- ulint new_free;
-
- ut_ad(mtr && undo_page);
-
- page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
- seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
-
- free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
-
- log_hdr = undo_page + free;
-
- new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
+ byte* page_free = TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ + undo_page->frame;
+ uint16_t free = mach_read_from_2(page_free);
+ uint16_t new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < srv_page_size - 100);
- mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
-
- mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
+ mach_write_to_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START
+ + undo_page->frame, new_free);
+ mach_write_to_2(page_free, new_free);
- mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
+ mach_write_to_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + undo_page->frame,
+ TRX_UNDO_ACTIVE);
+ byte* last_log = TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG
+ + undo_page->frame;
- prev_log = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
+ uint16_t prev_log = mach_read_from_2(last_log);
if (prev_log != 0) {
- trx_ulogf_t* prev_log_hdr;
-
- prev_log_hdr = undo_page + prev_log;
-
- mach_write_to_2(prev_log_hdr + TRX_UNDO_NEXT_LOG, free);
+ mach_write_to_2(prev_log + TRX_UNDO_NEXT_LOG + undo_page->frame,
+ free);
}
- mach_write_to_2(seg_hdr + TRX_UNDO_LAST_LOG, free);
+ mach_write_to_2(last_log, free);
- log_hdr = undo_page + free;
+ trx_ulogf_t* log_hdr = undo_page->frame + free;
mach_write_to_2(log_hdr + TRX_UNDO_NEEDS_PURGE, 1);
@@ -647,45 +554,55 @@ trx_undo_header_create(
mach_write_to_2(log_hdr + TRX_UNDO_PREV_LOG, prev_log);
/* Write the log record about the header creation */
- trx_undo_header_create_log(undo_page, trx_id, mtr);
+ mtr->set_modified();
+ if (mtr->get_log_mode() != MTR_LOG_ALL) {
+ ut_ad(mtr->get_log_mode() == MTR_LOG_NONE
+ || mtr->get_log_mode() == MTR_LOG_NO_REDO);
+ return free;
+ }
+
+ byte* log_ptr = mtr->get_log()->open(11 + 15);
+ log_ptr = mlog_write_initial_log_record_low(
+ MLOG_UNDO_HDR_CREATE,
+ undo_page->page.id.space(),
+ undo_page->page.id.page_no(),
+ log_ptr, mtr);
+ log_ptr += mach_u64_write_compressed(log_ptr, trx_id);
+ mlog_close(mtr, log_ptr);
return(free);
}
-/********************************************************************//**
-Write X/Open XA Transaction Identification (XID) to undo log header */
-static
-void
-trx_undo_write_xid(
-/*===============*/
- trx_ulogf_t* log_hdr,/*!< in: undo log header */
- const XID* xid, /*!< in: X/Open XA Transaction Identification */
- mtr_t* mtr) /*!< in: mtr */
+/** Write X/Open XA Transaction Identifier (XID) to undo log header
+@param[in,out] block undo header page
+@param[in] offset undo header record offset
+@param[in] xid distributed transaction identifier
+@param[in,out] mtr mini-transaction */
+static void trx_undo_write_xid(buf_block_t *block, uint16_t offset,
+ const XID &xid, mtr_t *mtr)
{
- DBUG_ASSERT(xid->gtrid_length >= 0);
- DBUG_ASSERT(xid->bqual_length >= 0);
- DBUG_ASSERT(xid->gtrid_length + xid->bqual_length < XIDDATASIZE);
-
- mlog_write_ulint(log_hdr + TRX_UNDO_XA_FORMAT,
- static_cast<ulint>(xid->formatID),
- MLOG_4BYTES, mtr);
-
- mlog_write_ulint(log_hdr + TRX_UNDO_XA_TRID_LEN,
- static_cast<ulint>(xid->gtrid_length),
- MLOG_4BYTES, mtr);
-
- mlog_write_ulint(log_hdr + TRX_UNDO_XA_BQUAL_LEN,
- static_cast<ulint>(xid->bqual_length),
- MLOG_4BYTES, mtr);
- const ulint xid_length = static_cast<ulint>(xid->gtrid_length
- + xid->bqual_length);
- mlog_write_string(log_hdr + TRX_UNDO_XA_XID,
- reinterpret_cast<const byte*>(xid->data),
- xid_length, mtr);
- if (UNIV_LIKELY(xid_length < XIDDATASIZE)) {
- mlog_memset(log_hdr + TRX_UNDO_XA_XID + xid_length,
- XIDDATASIZE - xid_length, 0, mtr);
- }
+ DBUG_ASSERT(xid.gtrid_length >= 0);
+ DBUG_ASSERT(xid.bqual_length >= 0);
+ DBUG_ASSERT(xid.gtrid_length + xid.bqual_length < XIDDATASIZE);
+ DBUG_ASSERT(mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG +
+ block->frame) == offset);
+
+ trx_ulogf_t* log_hdr= block->frame + offset;
+
+ mtr->write<4,mtr_t::OPT>(*block, log_hdr + TRX_UNDO_XA_FORMAT,
+ static_cast<uint32_t>(xid.formatID));
+ mtr->write<4,mtr_t::OPT>(*block, log_hdr + TRX_UNDO_XA_TRID_LEN,
+ static_cast<uint32_t>(xid.gtrid_length));
+ mtr->write<4,mtr_t::OPT>(*block, log_hdr + TRX_UNDO_XA_BQUAL_LEN,
+ static_cast<uint32_t>(xid.bqual_length));
+ const ulint xid_length= static_cast<ulint>(xid.gtrid_length
+ + xid.bqual_length);
+ mlog_write_string(log_hdr + TRX_UNDO_XA_XID,
+ reinterpret_cast<const byte*>(xid.data),
+ xid_length, mtr);
+ if (UNIV_LIKELY(xid_length < XIDDATASIZE))
+ mlog_memset(log_hdr + TRX_UNDO_XA_XID + xid_length,
+ XIDDATASIZE - xid_length, 0, mtr);
}
/********************************************************************//**
@@ -706,65 +623,50 @@ trx_undo_read_xid(const trx_ulogf_t* log_hdr, XID* xid)
memcpy(xid->data, log_hdr + TRX_UNDO_XA_XID, XIDDATASIZE);
}
-/***************************************************************//**
-Adds space for the XA XID after an undo log old-style header. */
-static
-void
-trx_undo_header_add_space_for_xid(
-/*==============================*/
- page_t* undo_page,/*!< in: undo log segment header page */
- trx_ulogf_t* log_hdr,/*!< in: undo log header */
- mtr_t* mtr) /*!< in: mtr */
+/** Add space for the XA XID after an undo log old-style header.
+@param[in,out] block undo page
+@param[in] offset offset of the undo log header
+@param[in,out] mtr mini-transaction */
+static void trx_undo_header_add_space_for_xid(buf_block_t *block, ulint offset,
+ mtr_t *mtr)
{
- trx_upagef_t* page_hdr;
- ulint free;
- ulint new_free;
-
- page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
-
- free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
-
- /* free is now the end offset of the old style undo log header */
-
- ut_a(free == (ulint)(log_hdr - undo_page) + TRX_UNDO_LOG_OLD_HDR_SIZE);
-
- new_free = free + (TRX_UNDO_LOG_XA_HDR_SIZE
- - TRX_UNDO_LOG_OLD_HDR_SIZE);
-
- /* Add space for a XID after the header, update the free offset
- fields on the undo log page and in the undo log header */
-
- mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_START, new_free,
- MLOG_2BYTES, mtr);
-
- mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE, new_free,
- MLOG_2BYTES, mtr);
-
- mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, new_free,
- MLOG_2BYTES, mtr);
+ uint16_t free= mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE +
+ block->frame);
+ /* free is now the end offset of the old style undo log header */
+ ut_a(free == offset + TRX_UNDO_LOG_OLD_HDR_SIZE);
+ free += TRX_UNDO_LOG_XA_HDR_SIZE - TRX_UNDO_LOG_OLD_HDR_SIZE;
+ /* Add space for a XID after the header, update the free offset
+ fields on the undo log page and in the undo log header */
+
+ mtr->write<2>(*block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START + block->frame,
+ free);
+ /* MDEV-12353 TODO: use MEMMOVE record */
+ mtr->write<2>(*block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + block->frame,
+ free);
+ mtr->write<2>(*block, offset + TRX_UNDO_LOG_START + block->frame, free);
}
/** Parse the redo log entry of an undo log page header create.
@param[in] ptr redo log record
@param[in] end_ptr end of log buffer
-@param[in,out] page page frame or NULL
+@param[in,out] block page frame or NULL
@param[in,out] mtr mini-transaction or NULL
@return end of log record or NULL */
byte*
trx_undo_parse_page_header(
const byte* ptr,
const byte* end_ptr,
- page_t* page,
+ buf_block_t* block,
mtr_t* mtr)
{
trx_id_t trx_id = mach_u64_parse_compressed(&ptr, end_ptr);
- if (ptr != NULL && page != NULL) {
- trx_undo_header_create(page, trx_id, mtr);
- return(const_cast<byte*>(ptr));
+ if (ptr && block) {
+ trx_undo_header_create(block, trx_id, mtr);
+
}
- return(const_cast<byte*>(ptr));
+ return const_cast<byte*>(ptr);
}
/** Allocate an undo log page.
@@ -777,7 +679,6 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
trx_rseg_t* rseg = undo->rseg;
buf_block_t* new_block = NULL;
ulint n_reserved;
- page_t* header_page;
/* When we add a page to an undo log, this is analogous to
a pessimistic insert in a B-tree, and we must reserve the
@@ -785,7 +686,7 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
mutex_enter(&rseg->mutex);
- header_page = trx_undo_page_get(
+ buf_block_t* header_block = trx_undo_page_get(
page_id_t(undo->rseg->space->id, undo->hdr_page_no), mtr);
if (!fsp_reserve_free_extents(&n_reserved, undo->rseg->space, 1,
@@ -795,7 +696,7 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
new_block = fseg_alloc_free_page_general(
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
- + header_page,
+ + header_block->frame,
undo->top_page_no + 1, FSP_UP, TRUE, mtr, mtr);
rseg->space->release_free_extents(n_reserved);
@@ -810,11 +711,8 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
trx_undo_page_init(new_block, mtr);
- flst_add_last(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST
- + header_page,
- TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE
- + new_block->frame,
- mtr);
+ flst_add_last(header_block, TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
+ new_block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
undo->size++;
rseg->curr_size++;
@@ -827,14 +725,14 @@ func_exit:
Frees an undo log page that is not the header page.
@return last page number in remaining log */
static
-ulint
+uint32_t
trx_undo_free_page(
/*===============*/
trx_rseg_t* rseg, /*!< in: rollback segment */
bool in_history, /*!< in: TRUE if the undo log is in the history
list */
- ulint hdr_page_no, /*!< in: header page number */
- ulint page_no, /*!< in: page number to free: must not be the
+ uint32_t hdr_page_no, /*!< in: header page number */
+ uint32_t page_no, /*!< in: page number to free: must not be the
header page */
mtr_t* mtr) /*!< in: mtr which does not have a latch to any
undo log page; the caller must have reserved
@@ -845,28 +743,31 @@ trx_undo_free_page(
ut_a(hdr_page_no != page_no);
ut_ad(mutex_own(&(rseg->mutex)));
- page_t* undo_page = trx_undo_page_get(page_id_t(space, page_no), mtr);
- page_t* header_page = trx_undo_page_get(page_id_t(space, hdr_page_no),
- mtr);
+ buf_block_t* undo_block = trx_undo_page_get(page_id_t(space, page_no),
+ mtr);
+ buf_block_t* header_block = trx_undo_page_get(page_id_t(space,
+ hdr_page_no),
+ mtr);
- flst_remove(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_page,
- TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + undo_page, mtr);
+ flst_remove(header_block, TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
+ undo_block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
- fseg_free_page(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + header_page,
+ fseg_free_page(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
+ + header_block->frame,
rseg->space, page_no, false, true, mtr);
const fil_addr_t last_addr = flst_get_last(
- TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_page);
+ TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_block->frame);
rseg->curr_size--;
if (in_history) {
- trx_rsegf_t* rseg_header = trx_rsegf_get(
+ buf_block_t* rseg_header = trx_rsegf_get(
rseg->space, rseg->page_no, mtr);
- uint32_t hist_size = mach_read_from_4(
- rseg_header + TRX_RSEG_HISTORY_SIZE);
+ byte* rseg_hist_size = TRX_RSEG + TRX_RSEG_HISTORY_SIZE
+ + rseg_header->frame;
+ uint32_t hist_size = mach_read_from_4(rseg_hist_size);
ut_ad(hist_size > 0);
- mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
- hist_size - 1, MLOG_4BYTES, mtr);
+ mtr->write<4>(*rseg_header, rseg_hist_size, hist_size - 1);
}
return(last_addr.page);
@@ -905,11 +806,11 @@ void trx_undo_truncate_end(trx_undo_t& undo, undo_no_t limit, bool is_temp)
trx_undo_rec_t* trunc_here = NULL;
mutex_enter(&undo.rseg->mutex);
- page_t* undo_page = trx_undo_page_get(
+ buf_block_t* undo_block = trx_undo_page_get(
page_id_t(undo.rseg->space->id, undo.last_page_no),
&mtr);
trx_undo_rec_t* rec = trx_undo_page_get_last_rec(
- undo_page, undo.hdr_page_no, undo.hdr_offset);
+ undo_block, undo.hdr_page_no, undo.hdr_offset);
while (rec) {
if (trx_undo_rec_get_undo_no(rec) < limit) {
goto func_exit;
@@ -917,7 +818,7 @@ void trx_undo_truncate_end(trx_undo_t& undo, undo_no_t limit, bool is_temp)
/* Truncate at least this record off, maybe more */
trunc_here = rec;
- rec = trx_undo_page_get_prev_rec(rec,
+ rec = trx_undo_page_get_prev_rec(undo_block, rec,
undo.hdr_page_no,
undo.hdr_offset);
}
@@ -933,10 +834,10 @@ func_exit:
mutex_exit(&undo.rseg->mutex);
if (trunc_here) {
- mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
- + TRX_UNDO_PAGE_FREE,
- ulint(trunc_here - undo_page),
- MLOG_2BYTES, &mtr);
+ mtr.write<2>(*undo_block,
+ TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ + undo_block->frame,
+ ulint(trunc_here - undo_block->frame));
}
mtr.commit();
@@ -955,14 +856,12 @@ freed, but emptied, if all the records there are below the limit.
void
trx_undo_truncate_start(
trx_rseg_t* rseg,
- ulint hdr_page_no,
- ulint hdr_offset,
+ uint32_t hdr_page_no,
+ uint16_t hdr_offset,
undo_no_t limit)
{
- page_t* undo_page;
trx_undo_rec_t* rec;
trx_undo_rec_t* last_rec;
- ulint page_no;
mtr_t mtr;
ut_ad(mutex_own(&(rseg->mutex)));
@@ -977,42 +876,36 @@ loop:
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
- rec = trx_undo_get_first_rec(rseg->space, hdr_page_no, hdr_offset,
- RW_X_LATCH, &mtr);
+ buf_block_t* undo_page;
+ rec = trx_undo_get_first_rec(*rseg->space, hdr_page_no, hdr_offset,
+ RW_X_LATCH, undo_page, &mtr);
if (rec == NULL) {
/* Already empty */
-
- mtr_commit(&mtr);
-
+done:
+ mtr.commit();
return;
}
- undo_page = page_align(rec);
-
last_rec = trx_undo_page_get_last_rec(undo_page, hdr_page_no,
hdr_offset);
if (trx_undo_rec_get_undo_no(last_rec) >= limit) {
-
- mtr_commit(&mtr);
-
- return;
+ goto done;
}
- page_no = page_get_page_no(undo_page);
-
- if (page_no == hdr_page_no) {
+ if (undo_page->page.id.page_no() == hdr_page_no) {
uint16_t end = mach_read_from_2(hdr_offset + TRX_UNDO_NEXT_LOG
- + undo_page);
+ + undo_page->frame);
if (end == 0) {
end = mach_read_from_2(TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE
- + undo_page);
+ + undo_page->frame);
}
- mlog_write_ulint(undo_page + hdr_offset + TRX_UNDO_LOG_START,
- end, MLOG_2BYTES, &mtr);
+ mtr.write<2>(*undo_page, undo_page->frame + hdr_offset
+ + TRX_UNDO_LOG_START, end);
} else {
- trx_undo_free_page(rseg, true, hdr_page_no, page_no, &mtr);
+ trx_undo_free_page(rseg, true, hdr_page_no,
+ undo_page->page.id.page_no(), &mtr);
}
mtr_commit(&mtr);
@@ -1023,52 +916,44 @@ loop:
/** Frees an undo log segment which is not in the history list.
@param[in] undo undo log
@param[in] noredo whether the undo tablespace is redo logged */
-static
-void
-trx_undo_seg_free(
- const trx_undo_t* undo,
- bool noredo)
+static void trx_undo_seg_free(const trx_undo_t* undo, bool noredo)
{
- trx_rseg_t* rseg;
- fseg_header_t* file_seg;
- trx_rsegf_t* rseg_header;
- trx_usegf_t* seg_header;
- ibool finished;
- mtr_t mtr;
+ ut_ad(undo->id < TRX_RSEG_N_SLOTS);
- rseg = undo->rseg;
+ trx_rseg_t* const rseg = undo->rseg;
+ bool finished;
+ mtr_t mtr;
do {
-
- mtr_start(&mtr);
+ mtr.start();
if (noredo) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
- mutex_enter(&(rseg->mutex));
+ mutex_enter(&rseg->mutex);
- seg_header = trx_undo_page_get(page_id_t(undo->rseg->space->id,
- undo->hdr_page_no),
- &mtr)
- + TRX_UNDO_SEG_HDR;
+ buf_block_t* block = trx_undo_page_get(
+ page_id_t(rseg->space->id, undo->hdr_page_no), &mtr);
- file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
+ fseg_header_t* file_seg = TRX_UNDO_SEG_HDR
+ + TRX_UNDO_FSEG_HEADER + block->frame;
finished = fseg_free_step(file_seg, false, &mtr);
if (finished) {
/* Update the rseg header */
- rseg_header = trx_rsegf_get(
+ buf_block_t* rseg_header = trx_rsegf_get(
rseg->space, rseg->page_no, &mtr);
- trx_rsegf_set_nth_undo(rseg_header, undo->id, FIL_NULL,
- &mtr);
-
+ compile_time_assert(FIL_NULL == 0xffffffff);
+ mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ + undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff,
+ &mtr);
MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
}
- mutex_exit(&(rseg->mutex));
- mtr_commit(&mtr);
+ mutex_exit(&rseg->mutex);
+ mtr.commit();
} while (!finished);
}
@@ -1080,9 +965,9 @@ trx_undo_seg_free(
@param[in] page_no undo log segment page number
@param[in,out] max_trx_id the largest observed transaction ID
@return size of the undo log in pages */
-ulint
-trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
- trx_id_t& max_trx_id)
+uint32_t
+trx_undo_mem_create_at_db_start(trx_rseg_t *rseg, ulint id, uint32_t page_no,
+ trx_id_t &max_trx_id)
{
mtr_t mtr;
XID xid;
@@ -1090,18 +975,18 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
ut_ad(id < TRX_RSEG_N_SLOTS);
mtr.start();
- const page_t* undo_page = trx_undo_page_get(
+ const buf_block_t* block = trx_undo_page_get(
page_id_t(rseg->space->id, page_no), &mtr);
const ulint type = mach_read_from_2(
- TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE + undo_page);
+ TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE + block->frame);
ut_ad(type == 0 || type == TRX_UNDO_INSERT || type == TRX_UNDO_UPDATE);
- uint state = mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
- + undo_page);
- uint offset = mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG
- + undo_page);
+ uint16_t state = mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
+ + block->frame);
+ uint16_t offset = mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG
+ + block->frame);
- const trx_ulogf_t* undo_header = undo_page + offset;
+ const trx_ulogf_t* undo_header = block->frame + offset;
/* Read X/Open XA transaction identification if it exists, or
set it to NULL. */
@@ -1125,7 +1010,7 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
undo->dict_operation = undo_header[TRX_UNDO_DICT_TRANS];
undo->table_id = mach_read_from_8(undo_header + TRX_UNDO_TABLE_ID);
undo->size = flst_get_len(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST
- + undo_page);
+ + block->frame);
if (UNIV_UNLIKELY(state == TRX_UNDO_TO_FREE)) {
/* This is an old-format insert_undo log segment that
@@ -1143,17 +1028,17 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
}
fil_addr_t last_addr = flst_get_last(
- TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + undo_page);
+ TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->frame);
undo->last_page_no = last_addr.page;
undo->top_page_no = last_addr.page;
- page_t* last_page = trx_undo_page_get(
+ const buf_block_t* last = trx_undo_page_get(
page_id_t(rseg->space->id, undo->last_page_no), &mtr);
if (const trx_undo_rec_t* rec = trx_undo_page_get_last_rec(
- last_page, page_no, offset)) {
- undo->top_offset = ulint(rec - last_page);
+ last, page_no, offset)) {
+ undo->top_offset = uint16_t(rec - last->frame);
undo->top_undo_no = trx_undo_rec_get_undo_no(rec);
ut_ad(!undo->empty());
} else {
@@ -1189,8 +1074,8 @@ trx_undo_mem_create(
trx_id_t trx_id, /*!< in: id of the trx for which the undo log
is created */
const XID* xid, /*!< in: X/Open transaction identification */
- ulint page_no,/*!< in: undo log header page number */
- ulint offset) /*!< in: undo log header byte offset on page */
+ uint32_t page_no,/*!< in: undo log header page number */
+ uint16_t offset) /*!< in: undo log header byte offset on page */
{
trx_undo_t* undo;
@@ -1238,7 +1123,7 @@ trx_undo_mem_init_for_reuse(
trx_id_t trx_id, /*!< in: id of the trx for which the undo log
is created */
const XID* xid, /*!< in: X/Open XA transaction identification*/
- ulint offset) /*!< in: undo log header byte offset on page */
+ uint16_t offset) /*!< in: undo log header byte offset on page */
{
ut_ad(mutex_own(&((undo->rseg)->mutex)));
@@ -1282,10 +1167,9 @@ trx_undo_create(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
rseg->curr_size++;
- ulint offset = trx_undo_header_create(block->frame, trx->id, mtr);
+ uint16_t offset = trx_undo_header_create(block, trx->id, mtr);
- trx_undo_header_add_space_for_xid(block->frame, block->frame + offset,
- mtr);
+ trx_undo_header_add_space_for_xid(block, offset, mtr);
*undo = trx_undo_mem_create(rseg, id, trx->id, trx->xid,
block->page.id.page_no(), offset);
@@ -1307,10 +1191,10 @@ trx_undo_create(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
case TRX_DICT_OP_TABLE:
(*undo)->table_id = trx->table_id;
(*undo)->dict_operation = TRUE;
- mlog_write_ulint(block->frame + offset + TRX_UNDO_DICT_TRANS,
- TRUE, MLOG_1BYTE, mtr);
- mlog_write_ull(block->frame + offset + TRX_UNDO_TABLE_ID,
- trx->table_id, mtr);
+ mtr->write<1,mtr_t::OPT>(*block, block->frame + offset
+ + TRX_UNDO_DICT_TRANS, 1U);
+ mtr->write<8,mtr_t::OPT>(*block, block->frame + offset
+ + TRX_UNDO_TABLE_ID, trx->table_id);
}
*err = DB_SUCCESS;
@@ -1355,19 +1239,18 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
*pundo = undo;
- ulint offset = trx_undo_header_create(block->frame, trx->id, mtr);
+ uint16_t offset = trx_undo_header_create(block, trx->id, mtr);
/* Reset the TRX_UNDO_PAGE_TYPE in case this page is being
repurposed after upgrading to MariaDB 10.3. */
if (ut_d(ulint type =) UNIV_UNLIKELY(
mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ block->frame))) {
ut_ad(type == TRX_UNDO_INSERT || type == TRX_UNDO_UPDATE);
- mlog_write_ulint(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
- + block->frame, 0, MLOG_2BYTES, mtr);
+ mtr->write<2>(*block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ + block->frame, 0U);
}
- trx_undo_header_add_space_for_xid(block->frame, block->frame + offset,
- mtr);
+ trx_undo_header_add_space_for_xid(block, offset, mtr);
trx_undo_mem_init_for_reuse(undo, trx->id, trx->xid, offset);
@@ -1385,10 +1268,10 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
case TRX_DICT_OP_TABLE:
undo->table_id = trx->table_id;
undo->dict_operation = TRUE;
- mlog_write_ulint(block->frame + offset + TRX_UNDO_DICT_TRANS,
- TRUE, MLOG_1BYTE, mtr);
- mlog_write_ull(block->frame + offset + TRX_UNDO_TABLE_ID,
- trx->table_id, mtr);
+ mtr->write<1,mtr_t::OPT>(*block, block->frame + offset
+ + TRX_UNDO_DICT_TRANS, 1U);
+ mtr->write<8,mtr_t::OPT>(*block, block->frame + offset
+ + TRX_UNDO_TABLE_ID, trx->table_id);
}
return block;
@@ -1502,39 +1385,28 @@ func_exit:
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@return undo log segment header page, x-latched */
-page_t*
+buf_block_t*
trx_undo_set_state_at_finish(
/*=========================*/
trx_undo_t* undo, /*!< in: undo log memory copy */
mtr_t* mtr) /*!< in: mtr */
{
- trx_usegf_t* seg_hdr;
- trx_upagef_t* page_hdr;
- page_t* undo_page;
- ulint state;
-
ut_a(undo->id < TRX_RSEG_N_SLOTS);
- undo_page = trx_undo_page_get(
+ buf_block_t* block = trx_undo_page_get(
page_id_t(undo->rseg->space->id, undo->hdr_page_no), mtr);
- seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
- page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
-
- if (undo->size == 1
- && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE)
- < TRX_UNDO_PAGE_REUSE_LIMIT) {
-
- state = TRX_UNDO_CACHED;
- } else {
- state = TRX_UNDO_TO_PURGE;
- }
+ const uint16_t state = undo->size == 1
+ && TRX_UNDO_PAGE_REUSE_LIMIT
+ > mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE
+ + block->frame)
+ ? TRX_UNDO_CACHED
+ : TRX_UNDO_TO_PURGE;
undo->state = state;
-
- mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, state, MLOG_2BYTES, mtr);
-
- return(undo_page);
+ mtr->write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
+ + block->frame, state);
+ return block;
}
/** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK.
@@ -1543,32 +1415,19 @@ trx_undo_set_state_at_finish(
@param[in] rollback false=XA PREPARE, true=XA ROLLBACK
@param[in,out] mtr mini-transaction
@return undo log segment header page, x-latched */
-page_t*
-trx_undo_set_state_at_prepare(
- trx_t* trx,
- trx_undo_t* undo,
- bool rollback,
- mtr_t* mtr)
+void trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo, bool rollback,
+ mtr_t *mtr)
{
- trx_usegf_t* seg_hdr;
- trx_ulogf_t* undo_header;
- page_t* undo_page;
- ulint offset;
-
- ut_ad(trx && undo && mtr);
-
ut_a(undo->id < TRX_RSEG_N_SLOTS);
- undo_page = trx_undo_page_get(
+ buf_block_t* block = trx_undo_page_get(
page_id_t(undo->rseg->space->id, undo->hdr_page_no), mtr);
- seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
-
if (rollback) {
ut_ad(undo->state == TRX_UNDO_PREPARED);
- mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE,
- MLOG_2BYTES, mtr);
- return(undo_page);
+ mtr->write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
+ + block->frame, TRX_UNDO_ACTIVE);
+ return;
}
/*------------------------------*/
@@ -1577,18 +1436,13 @@ trx_undo_set_state_at_prepare(
undo->xid = *trx->xid;
/*------------------------------*/
- mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, undo->state,
- MLOG_2BYTES, mtr);
-
- offset = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
- undo_header = undo_page + offset;
-
- mlog_write_ulint(undo_header + TRX_UNDO_XID_EXISTS,
- TRUE, MLOG_1BYTE, mtr);
-
- trx_undo_write_xid(undo_header, &undo->xid, mtr);
+ mtr->write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + block->frame,
+ undo->state);
+ uint16_t offset = mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG
+ + block->frame);
+ mtr->write<1>(*block, block->frame + offset + TRX_UNDO_XID_EXISTS, 1U);
- return(undo_page);
+ trx_undo_write_xid(block, offset, undo->xid, mtr);
}
/** Free an old insert or temporary undo log after commit or rollback.