summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-02-17 10:13:32 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-02-17 10:13:32 +0200
commitfc876980480616613f8eb8125a9b23af7ddf6d7e (patch)
tree85034326e88dbc0fdfe4a5f5b52c3e37be7677bc
parent5874aac71f1635ba88636065ed0681b7bf16bd89 (diff)
downloadmariadb-git-fc876980480616613f8eb8125a9b23af7ddf6d7e.tar.gz
MDEV-12353: Write less log for BLOB pages
fsp_page_create(): Always initialize the page. The logic to avoid initialization was made redundant and should have been removed in mysql/mysql-server@ce0a1e85e24e48b8171f767b44330da635a6ea0a (MySQL 5.7.5). btr_store_big_rec_extern_fields(): Remove the redundant initialization of FIL_PAGE_PREV and FIL_PAGE_NEXT. An INIT_PAGE record will have been written already. Only write the ROW_FORMAT=COMPRESSED page payload from FIL_PAGE_DATA onwards. We were unnecessarily writing from FIL_PAGE_TYPE onwards, which caused an assertion failure on recovery: recv_sys_t::alloc(size_t): Assertion 'len <= srv_page_size' failed when running the following tests: ./mtr --no-reorder innodb_zip.blob,4k innodb_zip.bug56680,4k
-rw-r--r--storage/innobase/btr/btr0btr.cc17
-rw-r--r--storage/innobase/btr/btr0cur.cc54
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc101
-rw-r--r--storage/innobase/include/btr0btr.h5
-rw-r--r--storage/innobase/include/fsp0fsp.h13
-rw-r--r--storage/innobase/trx/trx0undo.cc2
6 files changed, 56 insertions, 136 deletions
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 74980775f8e..3f2f167d242 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -505,10 +505,7 @@ btr_page_alloc_for_ibuf(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
-@retval NULL if no page could be allocated
-@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
buf_block_t*
btr_page_alloc_low(
@@ -523,10 +520,7 @@ btr_page_alloc_low(
for the allocation */
mtr_t* init_mtr) /*!< in/out: mtr or another
mini-transaction in which the
- page should be initialized.
- If init_mtr!=mtr, but the page
- is already X-latched in mtr, do
- not initialize the page. */
+ page should be initialized. */
{
page_t* root = btr_root_get(index, mtr);
@@ -541,7 +535,7 @@ btr_page_alloc_low(
buf_block_t* block = fseg_alloc_free_page_general(
seg_header, hint_page_no, file_direction,
- TRUE, mtr, init_mtr);
+ true, mtr, init_mtr);
#ifdef UNIV_DEBUG_SCRUBBING
if (block != NULL) {
@@ -565,10 +559,7 @@ btr_page_alloc_low(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
-@retval NULL if no page could be allocated
-@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
buf_block_t*
btr_page_alloc(
/*===========*/
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index b00d7c6659f..797aaf96e1c 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -7215,7 +7215,6 @@ btr_store_big_rec_extern_fields(
ulint hint_page_no;
ulint i;
mtr_t mtr;
- mtr_t mtr_bulk;
mem_heap_t* heap = NULL;
page_zip_des_t* page_zip;
z_stream c_stream;
@@ -7345,35 +7344,19 @@ btr_store_big_rec_extern_fields(
hint_page_no = prev_page_no + 1;
}
- mtr_t *alloc_mtr;
-
- if (UNIV_UNLIKELY(op == BTR_STORE_INSERT_BULK)) {
- mtr_bulk.start();
- mtr_bulk.set_spaces(mtr);
- alloc_mtr = &mtr_bulk;
- } else {
- alloc_mtr = &mtr;
- }
-
if (!fsp_reserve_free_extents(&r_extents,
index->table->space, 1,
- FSP_BLOB, alloc_mtr,
- 1)) {
-
- alloc_mtr->commit();
+ FSP_BLOB, &mtr, 1)) {
+ mtr.commit();
error = DB_OUT_OF_FILE_SPACE;
goto func_exit;
}
block = btr_page_alloc(index, hint_page_no, FSP_NO_DIR,
- 0, alloc_mtr, &mtr);
+ 0, &mtr, &mtr);
index->table->space->release_free_extents(r_extents);
- if (UNIV_UNLIKELY(op == BTR_STORE_INSERT_BULK)) {
- mtr_bulk.commit();
- }
-
ut_a(block != NULL);
page_no = block->page.id.page_no();
@@ -7411,14 +7394,20 @@ btr_store_big_rec_extern_fields(
row_log_table_blob_alloc(index, page_no);
}
+ ut_ad(!page_has_siblings(block->frame));
+ ut_ad(!fil_page_get_type(block->frame));
+
if (page_zip) {
int err;
page_zip_des_t* blob_page_zip;
- mach_write_to_2(block->frame + FIL_PAGE_TYPE,
- prev_page_no == FIL_NULL
- ? FIL_PAGE_TYPE_ZBLOB
- : FIL_PAGE_TYPE_ZBLOB2);
+ mtr.write<1>(*block,
+ FIL_PAGE_TYPE + 1 + block->frame,
+ prev_page_no == FIL_NULL
+ ? FIL_PAGE_TYPE_ZBLOB
+ : FIL_PAGE_TYPE_ZBLOB2);
+ block->page.zip.data[FIL_PAGE_TYPE + 1]
+ = block->frame[FIL_PAGE_TYPE + 1];
c_stream.next_out = block->frame
+ FIL_PAGE_DATA;
@@ -7430,22 +7419,11 @@ btr_store_big_rec_extern_fields(
ut_a(err == Z_STREAM_END
|| c_stream.avail_out == 0);
- compile_time_assert(FIL_PAGE_NEXT
- == FIL_PAGE_PREV + 4);
- compile_time_assert(FIL_NULL == 0xffffffff);
- mtr.memset(block, FIL_PAGE_PREV, 8, 0xff);
mtr.memcpy(*block,
- FIL_PAGE_TYPE,
+ FIL_PAGE_DATA,
page_zip_get_size(page_zip)
- - FIL_PAGE_TYPE
+ - FIL_PAGE_DATA
- c_stream.avail_out);
- /* Zero out the unused part of the page. */
- if (c_stream.avail_out) {
- mtr.memset(block,
- page_zip_get_size(page_zip)
- - c_stream.avail_out,
- c_stream.avail_out, 0);
- }
/* Copy the page to compressed storage,
because it will be flushed to disk
from there. */
@@ -7505,7 +7483,7 @@ next_zip_page:
break;
}
} else {
- mtr.write<2>(*block, FIL_PAGE_TYPE
+ mtr.write<1>(*block, FIL_PAGE_TYPE + 1
+ block->frame,
FIL_PAGE_TYPE_BLOB);
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index ede8d4f8c16..6eefabdfe52 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -104,14 +104,8 @@ direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in
-which the page should be initialized. If init_mtr != mtr, but the page is
-already latched in mtr, do not initialize the page
-@param[in] has_done_reservation TRUE if the space has already been
-reserved, in this case we will never return NULL
-@retval NULL if no page could be allocated
-@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+which the page should be initialized.
+@retval NULL if no page could be allocated */
static
buf_block_t*
fseg_alloc_free_page_low(
@@ -121,12 +115,12 @@ fseg_alloc_free_page_low(
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
- mtr_t* mtr,
- mtr_t* init_mtr
#ifdef UNIV_DEBUG
- , ibool has_done_reservation
+ bool has_done_reservation,
+ /*!< whether the space has already been reserved */
#endif /* UNIV_DEBUG */
-)
+ mtr_t* mtr,
+ mtr_t* init_mtr)
MY_ATTRIBUTE((warn_unused_result));
/** Get the tablespace header block, SX-latched
@@ -1063,16 +1057,12 @@ fsp_alloc_from_free_frag(buf_block_t *header, buf_block_t *xdes, xdes_t *descr,
}
/** Gets a buffer block for an allocated page.
-NOTE: If init_mtr != mtr, the block will only be initialized if it was
-not previously x-latched. It is assumed that the block has been
-x-latched only by mtr, and freed in mtr in that case.
@param[in,out] space tablespace
@param[in] offset page number of the allocated page
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction of the allocation
@param[in,out] init_mtr mini-transaction for initializing the page
-@return block, initialized if init_mtr==mtr
-or rw_lock_x_lock_count(&block->lock) == 1 */
+@return block, initialized */
static
buf_block_t*
fsp_page_create(
@@ -1085,34 +1075,21 @@ fsp_page_create(
buf_block_t* block = buf_page_create(page_id_t(space->id, offset),
space->zip_size(), init_mtr);
- ut_d(bool latched = mtr_memo_contains_flagged(mtr, block,
- MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_SX_FIX));
-
- ut_ad(rw_latch == RW_X_LATCH || rw_latch == RW_SX_LATCH);
-
/* Mimic buf_page_get(), but avoid the buf_pool->page_hash lookup. */
+ mtr_memo_type_t memo;
+
if (rw_latch == RW_X_LATCH) {
rw_lock_x_lock(&block->lock);
+ memo = MTR_MEMO_PAGE_X_FIX;
} else {
+ ut_ad(rw_latch == RW_SX_LATCH);
rw_lock_sx_lock(&block->lock);
+ memo = MTR_MEMO_PAGE_SX_FIX;
}
+ mtr_memo_push(init_mtr, block, memo);
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
- mtr_memo_push(init_mtr, block, rw_latch == RW_X_LATCH
- ? MTR_MEMO_PAGE_X_FIX : MTR_MEMO_PAGE_SX_FIX);
-
- if (init_mtr == mtr
- || (rw_latch == RW_X_LATCH
- ? rw_lock_get_x_lock_count(&block->lock) == 1
- : rw_lock_get_sx_lock_count(&block->lock) == 1)) {
-
- /* Initialize the page, unless it was already
- SX-latched in mtr. (In this case, we would want to
- allocate another page that has not been freed in mtr.) */
- ut_ad(init_mtr == mtr || !latched);
- fsp_init_file_page(space, block, init_mtr);
- }
+ fsp_init_file_page(space, block, init_mtr);
return(block);
}
@@ -1125,10 +1102,7 @@ The page is marked as used.
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mini-transaction in which the page should be
initialized (may be the same as mtr)
-@retval NULL if no page could be allocated
-@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
static MY_ATTRIBUTE((warn_unused_result, nonnull))
buf_block_t*
fsp_alloc_free_page(
@@ -1799,11 +1773,10 @@ fseg_create(
block = fseg_alloc_free_page_low(space,
inode, iblock, 0, FSP_UP,
RW_SX_LATCH,
- mtr, mtr
#ifdef UNIV_DEBUG
- , has_done_reservation
+ has_done_reservation,
#endif /* UNIV_DEBUG */
- );
+ mtr, mtr);
/* The allocation cannot fail if we have already reserved a
space for the page. */
@@ -1966,10 +1939,7 @@ not yet taken off it!
@param[out] xdes extent descriptor page
@param[in,out] space tablespace
@param[in,out] mtr mini-transaction
-@retval NULL if no page could be allocated
-@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
static
xdes_t*
fseg_alloc_free_extent(
@@ -2033,14 +2003,8 @@ direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in
-which the page should be initialized. If init_mtr != mtr, but the page is
-already latched in mtr, do not initialize the page
-@param[in] has_done_reservation TRUE if the space has already been
-reserved, in this case we will never return NULL
-@retval NULL if no page could be allocated
-@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+which the page should be initialized.
+@retval NULL if no page could be allocated */
static
buf_block_t*
fseg_alloc_free_page_low(
@@ -2050,12 +2014,12 @@ fseg_alloc_free_page_low(
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
- mtr_t* mtr,
- mtr_t* init_mtr
#ifdef UNIV_DEBUG
- , ibool has_done_reservation
+ bool has_done_reservation,
+ /*!< whether the space has already been reserved */
#endif /* UNIV_DEBUG */
-)
+ mtr_t* mtr,
+ mtr_t* init_mtr)
{
ib_id_t seg_id;
ulint used;
@@ -2276,10 +2240,7 @@ got_hinted_page:
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
fragmentation.
-@retval NULL if no page could be allocated
-@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
buf_block_t*
fseg_alloc_free_page_general(
/*=========================*/
@@ -2291,16 +2252,14 @@ fseg_alloc_free_page_general(
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
- ibool has_done_reservation, /*!< in: TRUE if the caller has
+ bool has_done_reservation, /*!< in: true if the caller has
already done the reservation for the page
with fsp_reserve_free_extents, then there
is no need to do the check for this individual
page */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction
- in which the page should be initialized.
- If init_mtr!=mtr, but the page is already
- latched in mtr, do not initialize the page. */
+ in which the page should be initialized. */
{
fseg_inode_t* inode;
ulint space_id;
@@ -2325,11 +2284,11 @@ fseg_alloc_free_page_general(
block = fseg_alloc_free_page_low(space,
inode, iblock, hint, direction,
- RW_X_LATCH, mtr, init_mtr
+ RW_X_LATCH,
#ifdef UNIV_DEBUG
- , has_done_reservation
+ has_done_reservation,
#endif /* UNIV_DEBUG */
- );
+ mtr, init_mtr);
/* The allocation cannot fail if we have already reserved a
space for the page. */
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index c5b67e4b3a6..f0b6a33d9a9 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -601,10 +601,7 @@ btr_get_size_and_reserved(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
-@retval NULL if no page could be allocated
-@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
buf_block_t*
btr_page_alloc(
/*===========*/
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 1bde90fdad4..eef8f94020f 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -409,15 +409,12 @@ file space fragmentation.
@return X-latched block, or NULL if no page could be allocated */
#define fseg_alloc_free_page(seg_header, hint, direction, mtr) \
fseg_alloc_free_page_general(seg_header, hint, direction, \
- FALSE, mtr, mtr)
+ false, mtr, mtr)
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
fragmentation.
-@retval NULL if no page could be allocated
-@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
-(init_mtr == mtr, or the page was not previously freed in mtr)
-@retval block (not allocated or initialized) otherwise */
+@retval NULL if no page could be allocated */
buf_block_t*
fseg_alloc_free_page_general(
/*=========================*/
@@ -429,16 +426,14 @@ fseg_alloc_free_page_general(
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
- ibool has_done_reservation, /*!< in: TRUE if the caller has
+ bool has_done_reservation, /*!< in: true if the caller has
already done the reservation for the page
with fsp_reserve_free_extents, then there
is no need to do the check for this individual
page */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction
- in which the page should be initialized.
- If init_mtr!=mtr, but the page is already
- latched in mtr, do not initialize the page. */
+ in which the page should be initialized. */
MY_ATTRIBUTE((warn_unused_result, nonnull));
/** Reserves free pages from a tablespace. All mini-transactions which may
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 9d1856a0b27..afe6440c5b7 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -553,7 +553,7 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
new_block = fseg_alloc_free_page_general(
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
+ header_block->frame,
- undo->top_page_no + 1, FSP_UP, TRUE, mtr, mtr);
+ undo->top_page_no + 1, FSP_UP, true, mtr, mtr);
rseg->space->release_free_extents(n_reserved);