summaryrefslogtreecommitdiff
path: root/storage/innobase/page
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-02-13 19:13:45 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-02-13 19:13:45 +0200
commitf8a9f906679e1d1ab026c245f7d24c652050d8b3 (patch)
treecb6f6eb922e7af2a139fca3cd8f5292375179c7c /storage/innobase/page
parent7ae21b18a6b73bbc3bf1ff448faf60c29ac1d386 (diff)
downloadmariadb-git-f8a9f906679e1d1ab026c245f7d24c652050d8b3.tar.gz
MDEV-12353: Remove support for crash-upgradebb-10.5-MDEV-12353
We tighten some assertions regarding dict_index_t::is_dummy and crash recovery, now that redo log processing will no longer create dummy objects.
Diffstat (limited to 'storage/innobase/page')
-rw-r--r--storage/innobase/page/page0cur.cc992
-rw-r--r--storage/innobase/page/page0page.cc56
-rw-r--r--storage/innobase/page/page0zip.cc325
3 files changed, 8 insertions, 1365 deletions
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index d6d908a3163..321c5a850cf 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -784,226 +784,18 @@ page_cur_open_on_rnd_user_rec(
ut_rnd_interval(n_recs) + 1);
}
-static void rec_set_heap_no(rec_t *rec, ulint heap_no, bool compact)
-{
- rec_set_bit_field_2(rec, heap_no,
- compact ? REC_NEW_HEAP_NO : REC_OLD_HEAP_NO,
- REC_HEAP_NO_MASK, REC_HEAP_NO_SHIFT);
-}
-
-static rec_t*
-page_cur_parse_insert_rec_zip(
- page_cur_t* cursor, /*!< in/out: page cursor */
- dict_index_t* index, /*!< in: record descriptor */
- const rec_t* rec, /*!< in: pointer to a physical record */
- offset_t* offsets,/*!< in/out: rec_get_offsets(rec, index) */
- mtr_t* mtr); /*!< in/out: mini-transaction */
-/***********************************************************//**
-Parses a log record of a record insert on a page.
-@return end of log record or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_cur_parse_insert_rec(
-/*======================*/
- bool is_short,/*!< in: true if short inserts */
- const byte* ptr, /*!< in: buffer */
- const byte* end_ptr,/*!< in: buffer end */
- buf_block_t* block, /*!< in: page or NULL */
- dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr or NULL */
-{
- ulint origin_offset = 0; /* remove warning */
- ulint end_seg_len;
- ulint mismatch_index = 0; /* remove warning */
- page_t* page;
- rec_t* cursor_rec;
- byte buf1[1024];
- byte* buf;
- const byte* ptr2 = ptr;
- ulint info_and_status_bits = 0; /* remove warning */
- page_cur_t cursor;
- mem_heap_t* heap = NULL;
- offset_t offsets_[REC_OFFS_NORMAL_SIZE];
- offset_t* offsets = offsets_;
- rec_offs_init(offsets_);
-
- page = block ? buf_block_get_frame(block) : NULL;
-
- if (is_short) {
- cursor_rec = page_rec_get_prev(page_get_supremum_rec(page));
- } else {
- ulint offset;
-
- /* Read the cursor rec offset as a 2-byte ulint */
-
- if (UNIV_UNLIKELY(end_ptr < ptr + 2)) {
-
- return(NULL);
- }
-
- offset = mach_read_from_2(ptr);
- ptr += 2;
-
- cursor_rec = page + offset;
-
- if (offset >= srv_page_size) {
-
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
- }
-
- end_seg_len = mach_parse_compressed(&ptr, end_ptr);
-
- if (ptr == NULL) {
-
- return(NULL);
- }
-
- if (end_seg_len >= srv_page_size << 1) {
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
-
- if (end_seg_len & 0x1UL) {
- /* Read the info bits */
-
- if (end_ptr < ptr + 1) {
-
- return(NULL);
- }
-
- info_and_status_bits = mach_read_from_1(ptr);
- ptr++;
-
- origin_offset = mach_parse_compressed(&ptr, end_ptr);
-
- if (ptr == NULL) {
-
- return(NULL);
- }
-
- ut_a(origin_offset < srv_page_size);
-
- mismatch_index = mach_parse_compressed(&ptr, end_ptr);
-
- if (ptr == NULL) {
-
- return(NULL);
- }
-
- ut_a(mismatch_index < srv_page_size);
- }
-
- if (end_ptr < ptr + (end_seg_len >> 1)) {
-
- return(NULL);
- }
-
- if (!block) {
-
- return(const_cast<byte*>(ptr + (end_seg_len >> 1)));
- }
-
- ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
- ut_ad(!buf_block_get_page_zip(block) || page_is_comp(page));
-
- /* Read from the log the inserted index record end segment which
- differs from the cursor record */
-
- const bool is_leaf = page_is_leaf(page);
-
- offsets = rec_get_offsets(cursor_rec, index, offsets, is_leaf,
- ULINT_UNDEFINED, &heap);
-
- if (!(end_seg_len & 0x1UL)) {
- info_and_status_bits = rec_get_info_and_status_bits(
- cursor_rec, page_is_comp(page));
- origin_offset = rec_offs_extra_size(offsets);
- mismatch_index = rec_offs_size(offsets) - (end_seg_len >> 1);
- }
-
- end_seg_len >>= 1;
-
- if (mismatch_index + end_seg_len < sizeof buf1) {
- buf = buf1;
- } else {
- buf = static_cast<byte*>(
- ut_malloc_nokey(mismatch_index + end_seg_len));
- }
-
- /* Build the inserted record to buf */
-
- if (UNIV_UNLIKELY(mismatch_index >= srv_page_size)) {
-
- ib::fatal() << "is_short " << is_short << ", "
- << "info_and_status_bits " << info_and_status_bits
- << ", offset " << page_offset(cursor_rec) << ","
- " o_offset " << origin_offset << ", mismatch index "
- << mismatch_index << ", end_seg_len " << end_seg_len
- << " parsed len " << (ptr - ptr2);
- }
-
- memcpy(buf, rec_get_start(cursor_rec, offsets), mismatch_index);
- memcpy(buf + mismatch_index, ptr, end_seg_len);
- rec_set_heap_no(buf + origin_offset, PAGE_HEAP_NO_USER_LOW,
- page_is_comp(page));
-
- if (page_is_comp(page)) {
- rec_set_info_and_status_bits(buf + origin_offset,
- info_and_status_bits);
- } else {
- rec_set_bit_field_1(buf + origin_offset, info_and_status_bits,
- REC_OLD_INFO_BITS,
- REC_INFO_BITS_MASK, REC_INFO_BITS_SHIFT);
- }
-
- page_cur_position(cursor_rec, block, &cursor);
-
- offsets = rec_get_offsets(buf + origin_offset, index, offsets,
- is_leaf, ULINT_UNDEFINED, &heap);
- /* The redo log record should only have been written
- after the write was successful. */
- if (block->page.zip.data) {
- if (!page_cur_parse_insert_rec_zip(&cursor, index,
- buf + origin_offset,
- offsets, mtr)) {
- ut_error;
- }
- } else if (!page_cur_insert_rec_low(&cursor, index,
- buf + origin_offset,
- offsets, mtr)) {
- ut_error;
- }
-
- if (buf != buf1) {
-
- ut_free(buf);
- }
-
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
- return(const_cast<byte*>(ptr + end_seg_len));
-}
-
/**
Set the owned records field of the record pointed to by a directory slot.
-@tparam compressed whether to update any ROW_FORMAT=COMPRESSED page as well
@param[in,out] block file page
@param[in] slot sparse directory slot
@param[in,out] n number of records owned by the directory slot
@param[in,out] mtr mini-transaction */
-template<bool compressed>
static void page_dir_slot_set_n_owned(buf_block_t *block,
const page_dir_slot_t *slot,
ulint n, mtr_t *mtr)
{
rec_t *rec= const_cast<rec_t*>(page_dir_slot_get_rec(slot));
- page_rec_set_n_owned<compressed>(block, rec, n, page_rec_is_comp(rec), mtr);
+ page_rec_set_n_owned<true>(block, rec, n, page_rec_is_comp(rec), mtr);
}
/**
@@ -1111,10 +903,10 @@ static void page_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
<= PAGE_DIR_SLOT_MAX_N_OWNED);
/* Merge the slots. */
ulint n_owned = page_dir_slot_get_n_owned(slot);
- page_dir_slot_set_n_owned<true>(block, slot, 0, mtr);
- page_dir_slot_set_n_owned<true>(block, up_slot, n_owned
- + page_dir_slot_get_n_owned(
- up_slot), mtr);
+ page_dir_slot_set_n_owned(block, slot, 0, mtr);
+ page_dir_slot_set_n_owned(block, up_slot, n_owned
+ + page_dir_slot_get_n_owned(up_slot),
+ mtr);
/* Shift the slots */
page_dir_slot_t* last_slot = page_dir_get_nth_slot(
block->frame, n_slots - 1);
@@ -1165,7 +957,7 @@ static void page_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
mtr->write<2>(*block, slot, page_offset(new_rec));
func_exit:
- page_dir_slot_set_n_owned<true>(block, up_slot, up_n_owned - 1, mtr);
+ page_dir_slot_set_n_owned(block, up_slot, up_n_owned - 1, mtr);
}
/** Allocate space for inserting an index record.
@@ -1235,7 +1027,6 @@ page_cur_insert_rec_low(
ut_ad(fil_page_index_page_check(block->frame));
ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + block->frame) ==
index->id ||
- index->is_dummy ||
mtr->is_inside_ibuf());
ut_ad(!page_rec_is_supremum(cur->rec));
@@ -1643,9 +1434,7 @@ page_cur_insert_rec_zip(
ut_ad(fil_page_get_type(cursor->block->frame) == FIL_PAGE_INDEX ||
fil_page_get_type(cursor->block->frame) == FIL_PAGE_RTREE);
ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + cursor->block->frame) ==
- index->id ||
- index->is_dummy ||
- mtr->is_inside_ibuf());
+ index->id || mtr->is_inside_ibuf());
ut_ad(!page_get_instant(cursor->block->frame));
ut_ad(!page_cur_is_after_last(cursor));
#ifdef UNIV_ZIP_DEBUG
@@ -1977,767 +1766,6 @@ inc_dir:
return insert_rec;
}
-/** Increment PAGE_N_DIRECTION.
-@param[in,out] block ROW_FORMAT=COMPRESSED index page
-@param[in,out] ptr the PAGE_DIRECTION_B field
-@param[in] dir PAGE_RIGHT or PAGE_LEFT */
-static inline void page_direction_increment(buf_block_t *block, byte *ptr,
- uint dir)
-{
- ut_ad(ptr == PAGE_HEADER + PAGE_DIRECTION_B + block->frame);
- ut_ad(dir == PAGE_RIGHT || dir == PAGE_LEFT);
- block->page.zip.data[PAGE_HEADER + PAGE_DIRECTION_B]= *ptr= dir;
- mach_write_to_2(PAGE_HEADER + PAGE_N_DIRECTION + block->frame,
- 1U + page_header_get_field(block->frame, PAGE_N_DIRECTION));
- memcpy_aligned<2>(PAGE_HEADER + PAGE_N_DIRECTION + block->frame,
- PAGE_HEADER + PAGE_N_DIRECTION + block->page.zip.data, 2);
-}
-
-/***********************************************************//**
-Inserts a record next to page cursor on a compressed and uncompressed
-page. Returns pointer to inserted record if succeed, i.e.,
-enough space available, NULL otherwise.
-The cursor stays at the same position.
-
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
-@return pointer to record if succeed, NULL otherwise */
-static rec_t*
-page_cur_parse_insert_rec_zip(
- page_cur_t* cursor, /*!< in/out: page cursor */
- dict_index_t* index, /*!< in: record descriptor */
- const rec_t* rec, /*!< in: pointer to a physical record */
- offset_t* offsets,/*!< in/out: rec_get_offsets(rec, index) */
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- byte* insert_buf;
- ulint rec_size;
- page_t* page; /*!< the relevant page */
- rec_t* insert_rec; /*!< inserted record */
- ulint heap_no; /*!< heap number of the inserted
- record */
- page_zip_des_t* page_zip;
-
- ut_ad(!log_sys.is_physical());
-
- page_zip = page_cur_get_page_zip(cursor);
- ut_ad(page_zip);
- ut_ad(rec_offs_validate(rec, index, offsets));
-
- page = page_cur_get_page(cursor);
- ut_ad(dict_table_is_comp(index->table));
- ut_ad(page_is_comp(page));
- ut_ad(fil_page_index_page_check(page));
- ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID) == index->id
- || index->is_dummy
- || mtr->is_inside_ibuf());
- ut_ad(!page_get_instant(page));
- ut_ad(!page_cur_is_after_last(cursor));
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, index));
-#endif /* UNIV_ZIP_DEBUG */
-
- /* 1. Get the size of the physical record in the page */
- rec_size = rec_offs_size(offsets);
-
-#ifdef UNIV_DEBUG_VALGRIND
- {
- const void* rec_start
- = rec - rec_offs_extra_size(offsets);
- ulint extra_size
- = rec_offs_extra_size(offsets)
- - (rec_offs_comp(offsets)
- ? REC_N_NEW_EXTRA_BYTES
- : REC_N_OLD_EXTRA_BYTES);
-
- /* All data bytes of the record must be valid. */
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- /* The variable-length header must be valid. */
- UNIV_MEM_ASSERT_RW(rec_start, extra_size);
- }
-#endif /* UNIV_DEBUG_VALGRIND */
-
- const bool reorg_before_insert = page_has_garbage(page)
- && rec_size > page_get_max_insert_size(page, 1)
- && rec_size <= page_get_max_insert_size_after_reorganize(
- page, 1);
- constexpr uint16_t page_free_f = PAGE_FREE + PAGE_HEADER;
- byte* const page_free = my_assume_aligned<4>(page_free_f + page);
- uint16_t free_rec;
-
- /* 2. Try to find suitable space from page memory management */
- if (!page_zip_available(page_zip, dict_index_is_clust(index),
- rec_size, 1)
- || reorg_before_insert) {
- /* The values can change dynamically. */
- ulint level = page_zip_level;
-#ifdef UNIV_DEBUG
- rec_t* cursor_rec = page_cur_get_rec(cursor);
-#endif /* UNIV_DEBUG */
-
- /* If we are not writing compressed page images, we
- must reorganize the page before attempting the
- insert. */
- if (recv_recovery_is_on() && !log_sys.is_physical()) {
- /* Insert into the uncompressed page only.
- The page reorganization or creation that we
- would attempt outside crash recovery would
- have been covered by a previous redo log record. */
- } else if (page_is_empty(page)) {
- ut_ad(page_cur_is_before_first(cursor));
-
- /* This is an empty page. Recreate it to
- get rid of the modification log. */
- page_create_zip(page_cur_get_block(cursor), index,
- page_header_get_field(page, PAGE_LEVEL),
- 0, mtr);
- ut_ad(!page_header_get_ptr(page, PAGE_FREE));
-
- if (page_zip_available(
- page_zip, dict_index_is_clust(index),
- rec_size, 1)) {
- free_rec = 0;
- goto use_heap;
- }
-
- /* The cursor should remain on the page infimum. */
- return(NULL);
- } else if (!page_zip->m_nonempty && !page_has_garbage(page)) {
- /* The page has been freshly compressed, so
- reorganizing it will not help. */
- } else {
- ulint pos = page_rec_get_n_recs_before(cursor->rec);
-
- if (!page_zip_reorganize(page_cur_get_block(cursor),
- index, level, mtr, true)) {
- ut_ad(cursor->rec == cursor_rec);
- return NULL;
- }
-
- if (pos) {
- cursor->rec = page_rec_get_nth(page, pos);
- } else {
- ut_ad(cursor->rec == page_get_infimum_rec(
- page));
- }
-
- ut_ad(!page_header_get_ptr(page, PAGE_FREE));
-
- if (page_zip_available(
- page_zip, dict_index_is_clust(index),
- rec_size, 1)) {
- /* After reorganizing, there is space
- available. */
- free_rec = 0;
- goto use_heap;
- }
- }
-
- /* Try compressing the whole page afterwards. */
- const mtr_log_t log_mode = mtr->set_log_mode(MTR_LOG_NONE);
- insert_rec = page_cur_insert_rec_low(
- cursor, index, rec, offsets, mtr);
- mtr->set_log_mode(log_mode);
-
- /* If recovery is on, this implies that the compression
- of the page was successful during runtime. Had that not
- been the case or had the redo logging of compressed
- pages been enabled during runtime then we'd have seen
- a MLOG_ZIP_PAGE_COMPRESS redo record. Therefore, we
- know that we don't need to reorganize the page. We,
- however, do need to recompress the page. That will
- happen when the next redo record is read which must
- be of type MLOG_ZIP_PAGE_COMPRESS_NO_DATA and it must
- contain a valid compression level value.
- This implies that during recovery from this point till
- the next redo is applied the uncompressed and
- compressed versions are not identical and
- page_zip_validate will fail but that is OK because
- we call page_zip_validate only after processing
- all changes to a page under a single mtr during
- recovery. */
- if (insert_rec == NULL) {
- /* Out of space.
- This should never occur during crash recovery,
- because the MLOG_COMP_REC_INSERT should only
- be logged after a successful operation. */
- ut_ad(!recv_recovery_is_on());
- ut_ad(!index->is_dummy);
- } else if (recv_recovery_is_on() && !log_sys.is_physical()) {
- /* This should be followed by
- MLOG_ZIP_PAGE_COMPRESS_NO_DATA,
- which should succeed. */
- rec_offs_make_valid(insert_rec, index,
- page_is_leaf(page), offsets);
- } else {
- ulint pos = page_rec_get_n_recs_before(insert_rec);
- ut_ad(pos > 0);
-
- /* We are writing entire page images to the
- log. Reduce the redo log volume by
- reorganizing the page at the same time. */
- if (page_zip_reorganize(cursor->block, index,
- level, mtr)) {
- /* The page was reorganized: Seek to pos. */
- if (pos > 1) {
- cursor->rec = page_rec_get_nth(
- page, pos - 1);
- } else {
- cursor->rec = page + PAGE_NEW_INFIMUM;
- }
-
- insert_rec = page + rec_get_next_offs(
- cursor->rec, TRUE);
- rec_offs_make_valid(
- insert_rec, index,
- page_is_leaf(page), offsets);
- return insert_rec;
- }
-
- /* Theoretically, we could try one last resort
- of btr_page_reorganize_low() followed by
- page_zip_available(), but that would be very
- unlikely to succeed. (If the full reorganized
- page failed to compress, why would it succeed
- to compress the page, plus log the insert of
- this record?) */
-
- /* Out of space: restore the page */
- if (!page_zip_decompress(page_zip, page, FALSE)) {
- ut_error; /* Memory corrupted? */
- }
- ut_ad(page_validate(page, index));
- insert_rec = NULL;
- }
-
- return(insert_rec);
- }
-
- free_rec = mach_read_from_2(page_free);
- if (free_rec) {
- /* Try to allocate from the head of the free list. */
- lint extra_size_diff;
- offset_t foffsets_[REC_OFFS_NORMAL_SIZE];
- offset_t* foffsets = foffsets_;
- mem_heap_t* heap = NULL;
-
- rec_offs_init(foffsets_);
-
- foffsets = rec_get_offsets(page + free_rec, index, foffsets,
- page_is_leaf(page),
- ULINT_UNDEFINED, &heap);
- if (rec_offs_size(foffsets) < rec_size) {
-too_small:
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
- free_rec = 0;
- goto use_heap;
- }
-
- insert_buf = page + free_rec - rec_offs_extra_size(foffsets);
-
- /* On compressed pages, do not relocate records from
- the free list. If extra_size would grow, use the heap. */
- extra_size_diff = lint(rec_offs_extra_size(offsets)
- - rec_offs_extra_size(foffsets));
-
- if (UNIV_UNLIKELY(extra_size_diff < 0)) {
- /* Add an offset to the extra_size. */
- if (rec_offs_size(foffsets)
- < rec_size - ulint(extra_size_diff)) {
-
- goto too_small;
- }
-
- insert_buf -= extra_size_diff;
- } else if (UNIV_UNLIKELY(extra_size_diff)) {
- /* Do not allow extra_size to grow */
-
- goto too_small;
- }
-
- heap_no = rec_get_heap_no_new(page + free_rec);
- int16_t next_rec = mach_read_from_2(page + free_rec - REC_NEXT);
- /* We assume that int16_t is safe to use here.
- With innodb_page_size=64k it would be unsafe,
- but that cannot be used with ROW_FORMAT=COMPRESSED. */
- static_assert(UNIV_ZIP_SIZE_SHIFT_MAX == 14, "compatibility");
- if (next_rec) {
- next_rec += free_rec;
- ut_ad(int{PAGE_NEW_SUPREMUM_END + REC_N_NEW_EXTRA_BYTES}
- <= next_rec);
- ut_ad(static_cast<uint16_t>(next_rec) < srv_page_size);
- }
- mtr->write<2>(*cursor->block, page_free,
- static_cast<uint16_t>(next_rec));
- byte* garbage = my_assume_aligned<2>(page_free + 2);
- ut_ad(mach_read_from_2(garbage) >= rec_size);
- mtr->write<2>(*cursor->block, garbage,
- mach_read_from_2(garbage) - rec_size);
- compile_time_assert(PAGE_GARBAGE == PAGE_FREE + 2);
- compile_time_assert(!((PAGE_HEADER + PAGE_FREE) % 4));
- memcpy_aligned<4>(&page_zip->data[page_free_f], page_free, 4);
- /* TODO: group with PAGE_LAST_INSERT */
-
- if (!page_is_leaf(page)) {
- /* Zero out the node pointer of free_rec,
- in case it will not be overwritten by
- insert_rec. */
-
- ut_ad(rec_size > REC_NODE_PTR_SIZE);
-
- if (rec_offs_extra_size(foffsets)
- + rec_offs_data_size(foffsets) > rec_size) {
-
- memset(rec_get_end(page + free_rec, foffsets)
- - REC_NODE_PTR_SIZE, 0,
- REC_NODE_PTR_SIZE);
- }
- } else if (dict_index_is_clust(index)) {
- /* Zero out the DB_TRX_ID and DB_ROLL_PTR
- columns of free_rec, in case it will not be
- overwritten by insert_rec. */
-
- ulint trx_id_offs;
- ulint len;
-
- trx_id_offs = rec_get_nth_field_offs(
- foffsets, index->db_trx_id(), &len);
- ut_ad(len == DATA_TRX_ID_LEN);
-
- if (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN + trx_id_offs
- + rec_offs_extra_size(foffsets) > rec_size) {
- /* We will have to zero out the
- DB_TRX_ID and DB_ROLL_PTR, because
- they will not be fully overwritten by
- insert_rec. */
-
- memset(page + free_rec + trx_id_offs, 0,
- DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
- }
-
- ut_ad(free_rec + trx_id_offs + DATA_TRX_ID_LEN
- == rec_get_nth_field(free_rec, foffsets,
- index->db_roll_ptr(), &len));
- ut_ad(len == DATA_ROLL_PTR_LEN);
- }
-
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
- } else {
-use_heap:
- ut_ad(!free_rec);
- insert_buf = page_mem_alloc_heap<true>(cursor->block, rec_size,
- &heap_no, mtr);
-
- if (UNIV_UNLIKELY(insert_buf == NULL)) {
- return(NULL);
- }
-
- page_zip_dir_add_slot(cursor->block, index, mtr);
- }
-
- /* 3. Create the record */
- insert_rec = rec_copy(insert_buf, rec, offsets);
- rec_offs_make_valid(insert_rec, index, page_is_leaf(page), offsets);
-
- /* 4. Insert the record in the linked list of records */
- ut_ad(cursor->rec != insert_rec);
-
- /* next record after current before the insertion */
- const rec_t* next_rec = page_rec_get_next_low(cursor->rec, TRUE);
- ut_ad(rec_get_status(cursor->rec) <= REC_STATUS_INFIMUM);
- ut_ad(rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
- ut_ad(rec_get_status(next_rec) != REC_STATUS_INFIMUM);
-
- mach_write_to_2(insert_rec - REC_NEXT, static_cast<uint16_t>
- (next_rec - insert_rec));
- mach_write_to_2(cursor->rec - REC_NEXT, static_cast<uint16_t>
- (insert_rec - cursor->rec));
- byte* n_recs = my_assume_aligned<2>(PAGE_N_RECS + PAGE_HEADER + page);
- mtr->write<2>(*cursor->block, n_recs, 1U + mach_read_from_2(n_recs));
- memcpy_aligned<2>(&page_zip->data[PAGE_N_RECS + PAGE_HEADER], n_recs,
- 2);
-
- /* 5. Set the n_owned field in the inserted record to zero,
- and set the heap_no field */
- rec_set_bit_field_1(insert_rec, 0, REC_NEW_N_OWNED,
- REC_N_OWNED_MASK, REC_N_OWNED_SHIFT);
- rec_set_bit_field_2(insert_rec, heap_no, REC_NEW_HEAP_NO,
- REC_HEAP_NO_MASK, REC_HEAP_NO_SHIFT);
-
- UNIV_MEM_ASSERT_RW(rec_get_start(insert_rec, offsets),
- rec_offs_size(offsets));
-
- page_zip_dir_insert(cursor, free_rec, insert_rec, mtr);
-
- /* 6. Update the last insertion info in page header */
- byte* last_insert = my_assume_aligned<4>(PAGE_LAST_INSERT + PAGE_HEADER
- + page);
- const uint16_t last_insert_rec = mach_read_from_2(last_insert);
- ut_ad(!last_insert_rec
- || rec_get_node_ptr_flag(page + last_insert_rec)
- == rec_get_node_ptr_flag(insert_rec));
- /* FIXME: combine with PAGE_DIRECTION changes */
- mtr->write<2>(*cursor->block, last_insert, page_offset(insert_rec));
- memcpy_aligned<4>(&page_zip->data[PAGE_LAST_INSERT + PAGE_HEADER],
- last_insert, 2);
-
- if (!index->is_spatial()) {
- byte* ptr = PAGE_HEADER + PAGE_DIRECTION_B + page;
- if (UNIV_UNLIKELY(!last_insert_rec)) {
-no_direction:
- page_zip->data[PAGE_HEADER + PAGE_DIRECTION_B] = *ptr
- = PAGE_NO_DIRECTION;
- memset_aligned<2>(PAGE_HEADER + PAGE_N_DIRECTION + page,
- 0, 2);
- memset_aligned<2>(PAGE_HEADER + PAGE_N_DIRECTION
- + page_zip->data, 0, 2);
- } else if (page + last_insert_rec == cursor->rec
- && page_ptr_get_direction(ptr) != PAGE_LEFT) {
- page_direction_increment(cursor->block, ptr,
- PAGE_RIGHT);
- } else if (page_ptr_get_direction(ptr) != PAGE_RIGHT
- && page_rec_get_next(insert_rec)
- == page + last_insert_rec) {
- page_direction_increment(cursor->block, ptr,
- PAGE_LEFT);
- } else {
- goto no_direction;
- }
- }
-
- /* 7. It remains to update the owner record. */
- ulint n_owned;
-
- while (!(n_owned = rec_get_n_owned_new(next_rec))) {
- next_rec = page_rec_get_next_low(next_rec, true);
- }
-
- rec_set_bit_field_1(const_cast<rec_t*>(next_rec), n_owned + 1,
- REC_NEW_N_OWNED,
- REC_N_OWNED_MASK, REC_N_OWNED_SHIFT);
-
- /* 8. Now we have incremented the n_owned field of the owner
- record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
- we have to split the corresponding directory slot in two. */
- if (UNIV_UNLIKELY(n_owned == PAGE_DIR_SLOT_MAX_N_OWNED)) {
- page_dir_split_slot<true>(page_cur_get_block(cursor),
- page_dir_find_owner_slot(next_rec),
- mtr);
- }
-
- page_zip_write_rec(cursor->block, insert_rec, index, offsets, 1, mtr);
- return insert_rec;
-}
-
-/**********************************************************//**
-Parses a log record of copying a record list end to a new created page.
-@return end of log record or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_parse_copy_rec_list_to_created_page(
-/*=====================================*/
- const byte* ptr, /*!< in: buffer */
- const byte* end_ptr,/*!< in: buffer end */
- buf_block_t* block, /*!< in: page or NULL */
- dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr or NULL */
-{
- ulint log_data_len;
-
- ut_ad(index->is_dummy);
-
- if (ptr + 4 > end_ptr) {
-
- return(NULL);
- }
-
- log_data_len = mach_read_from_4(ptr);
- ptr += 4;
-
- const byte* rec_end = ptr + log_data_len;
-
- if (rec_end > end_ptr) {
-
- return(NULL);
- }
-
- if (!block) {
-
- return(rec_end);
- }
-
- ut_ad(fil_page_index_page_check(block->frame));
- /* This function is never invoked on the clustered index root page,
- except in the redo log apply of
- page_copy_rec_list_end_to_created_page().
- For other pages, this field must be zero-initialized. */
- ut_ad(!page_get_instant(block->frame)
- || !page_has_siblings(block->frame));
-
- while (ptr < rec_end) {
- ptr = page_cur_parse_insert_rec(true, ptr, end_ptr,
- block, index, mtr);
- }
-
- ut_a(ptr == rec_end);
-
- memset_aligned<2>(PAGE_HEADER + PAGE_LAST_INSERT + block->frame, 0, 2);
- if (block->page.zip.data) {
- memset_aligned<2>(PAGE_HEADER + PAGE_LAST_INSERT
- + block->page.zip.data, 0, 2);
- }
-
- if (index->is_spatial()) {
- return rec_end;
- }
-
- block->frame[PAGE_HEADER + PAGE_DIRECTION_B] &= ~((1U << 3) - 1);
- block->frame[PAGE_HEADER + PAGE_DIRECTION_B] |= PAGE_NO_DIRECTION;
- if (block->page.zip.data) {
- block->page.zip.data[PAGE_HEADER + PAGE_DIRECTION_B]
- = PAGE_NO_DIRECTION;
- }
-
- return(rec_end);
-}
-
-/*************************************************************//**
-Copies records from page to a newly created page, from a given record onward,
-including that record. Infimum and supremum records are not copied.
-
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit(). */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-void
-page_copy_rec_list_end_to_created_page(
-/*===================================*/
- buf_block_t* block, /*!< in/out: index page to copy to */
- rec_t* rec, /*!< in: first record to copy */
- dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
-{
- page_dir_slot_t* slot = 0; /* remove warning */
- page_t* new_page = block->frame;
- byte* heap_top;
- rec_t* insert_rec = 0; /* remove warning */
- rec_t* prev_rec;
- ulint count;
- ulint n_recs;
- ulint slot_index;
- ulint rec_size;
- mem_heap_t* heap = NULL;
- offset_t offsets_[REC_OFFS_NORMAL_SIZE];
- offset_t* offsets = offsets_;
- rec_offs_init(offsets_);
-
- /* The record was never emitted for ROW_FORMAT=COMPRESSED pages. */
- ut_ad(!block->page.zip.data);
- ut_ad(page_dir_get_n_heap(new_page) == PAGE_HEAP_NO_USER_LOW);
- ut_ad(page_align(rec) != new_page);
- ut_ad(page_rec_is_comp(rec) == page_is_comp(new_page));
- ut_ad(fil_page_index_page_check(new_page));
- /* This function is never invoked on the clustered index root page,
- except in btr_lift_page_up(). */
- ut_ad(!page_get_instant(new_page) || !page_has_siblings(new_page));
-
- if (page_rec_is_infimum(rec)) {
-
- rec = page_rec_get_next(rec);
- }
-
- if (page_rec_is_supremum(rec)) {
-
- return;
- }
-
-#ifdef UNIV_DEBUG
- /* To pass the debug tests we have to set these dummy values
- in the debug version */
- mach_write_to_2(PAGE_HEADER + PAGE_N_DIR_SLOTS + new_page,
- srv_page_size / 2);
- mach_write_to_2(PAGE_HEADER + PAGE_HEAP_TOP + new_page,
- srv_page_size - 1);
-#endif
- prev_rec = page_get_infimum_rec(new_page);
- if (page_is_comp(new_page)) {
- heap_top = new_page + PAGE_NEW_SUPREMUM_END;
- } else {
- heap_top = new_page + PAGE_OLD_SUPREMUM_END;
- }
- count = 0;
- slot_index = 0;
- n_recs = 0;
-
- const bool is_leaf = page_is_leaf(new_page);
-
- do {
- offsets = rec_get_offsets(rec, index, offsets, is_leaf,
- ULINT_UNDEFINED, &heap);
- insert_rec = rec_copy(heap_top, rec, offsets);
-
- const bool comp = page_is_comp(new_page) != 0;
-
- if (comp) {
- rec_set_next_offs_new(prev_rec,
- page_offset(insert_rec));
- } else {
- rec_set_next_offs_old(prev_rec,
- page_offset(insert_rec));
- }
-
- page_rec_set_n_owned<false>(block, insert_rec, 0, comp, mtr);
-
- rec_set_heap_no(insert_rec, PAGE_HEAP_NO_USER_LOW + n_recs,
- page_is_comp(new_page));
-
- count++;
- n_recs++;
-
- if (UNIV_UNLIKELY
- (count == (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2)) {
-
- slot_index++;
-
- slot = page_dir_get_nth_slot(new_page, slot_index);
- mach_write_to_2(slot, page_offset(insert_rec));
- page_dir_slot_set_n_owned<false>(block, slot, count,
- mtr);
-
- count = 0;
- }
-
- rec_size = rec_offs_size(offsets);
-
- ut_ad(heap_top < new_page + srv_page_size);
-
- heap_top += rec_size;
-
- rec_offs_make_valid(insert_rec, index, is_leaf, offsets);
- prev_rec = insert_rec;
- rec = page_rec_get_next(rec);
- } while (!page_rec_is_supremum(rec));
-
- ut_ad(n_recs);
-
- if ((slot_index > 0) && (count + 1
- + (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2
- <= PAGE_DIR_SLOT_MAX_N_OWNED)) {
- /* We can merge the two last dir slots. This operation is
- here to make this function imitate exactly the equivalent
- task made using page_cur_insert_rec, which we use in database
- recovery to reproduce the task performed by this function.
- To be able to check the correctness of recovery, it is good
- that it imitates exactly. */
-
- count += (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2;
-
- page_dir_slot_set_n_owned<false>(block, slot, 0, mtr);
-
- slot_index--;
- }
-
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
- slot = page_dir_get_nth_slot(new_page, 1 + slot_index);
-
- if (page_is_comp(new_page)) {
- rec_set_next_offs_new(insert_rec, PAGE_NEW_SUPREMUM);
- mach_write_to_2(slot, PAGE_NEW_SUPREMUM);
- rec_set_bit_field_1(new_page + PAGE_NEW_SUPREMUM, count + 1,
- REC_NEW_N_OWNED,
- REC_N_OWNED_MASK, REC_N_OWNED_SHIFT);
- } else {
- rec_set_next_offs_old(insert_rec, PAGE_OLD_SUPREMUM);
- mach_write_to_2(slot, PAGE_OLD_SUPREMUM);
- rec_set_bit_field_1(new_page + PAGE_OLD_SUPREMUM, count + 1,
- REC_OLD_N_OWNED,
- REC_N_OWNED_MASK, REC_N_OWNED_SHIFT);
- }
-
- mach_write_to_2(PAGE_HEADER + PAGE_N_DIR_SLOTS + new_page,
- 2 + slot_index);
- mach_write_to_2(PAGE_HEADER + PAGE_HEAP_TOP + new_page,
- page_offset(heap_top));
- mach_write_to_2(PAGE_HEADER + PAGE_N_HEAP + new_page,
- PAGE_HEAP_NO_USER_LOW + n_recs);
- mach_write_to_2(PAGE_HEADER + PAGE_N_RECS + new_page, n_recs);
-
- memset_aligned<2>(PAGE_HEADER + PAGE_LAST_INSERT + new_page, 0, 2);
- mach_write_to_1(PAGE_HEADER + PAGE_DIRECTION_B + new_page,
- (mach_read_from_1(PAGE_HEADER + PAGE_DIRECTION_B
- + new_page) & ~((1U << 3) - 1))
- | PAGE_NO_DIRECTION);
- memset_aligned<2>(PAGE_HEADER + PAGE_N_DIRECTION + new_page, 0, 2);
-}
-
-/***********************************************************//**
-Parses log record of a record delete on a page.
-@return pointer to record end or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_cur_parse_delete_rec(
-/*======================*/
- const byte* ptr, /*!< in: buffer */
- const byte* end_ptr,/*!< in: buffer end */
- buf_block_t* block, /*!< in: page or NULL */
- dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in/out: mini-transaction,
- or NULL if block=NULL */
-{
- ulint offset;
- page_cur_t cursor;
-
- ut_ad(!block == !mtr);
-
- if (end_ptr < ptr + 2) {
-
- return(NULL);
- }
-
- /* Read the cursor rec offset as a 2-byte ulint */
- offset = mach_read_from_2(ptr);
- ptr += 2;
-
- if (UNIV_UNLIKELY(offset >= srv_page_size)) {
- recv_sys.found_corrupt_log = true;
- return NULL;
- }
-
- if (block) {
- page_t* page = buf_block_get_frame(block);
- mem_heap_t* heap = NULL;
- offset_t offsets_[REC_OFFS_NORMAL_SIZE];
- rec_t* rec = page + offset;
- rec_offs_init(offsets_);
-
- page_cur_position(rec, block, &cursor);
- ut_ad(!buf_block_get_page_zip(block) || page_is_comp(page));
-
- page_cur_delete_rec(&cursor, index,
- rec_get_offsets(rec, index, offsets_,
- page_rec_is_leaf(rec),
- ULINT_UNDEFINED, &heap),
- mtr);
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
- }
-
- return(ptr);
-}
-
/** Prepend a record to the PAGE_FREE list.
@param[in,out] block index page
@param[in,out] rec record being deleted
@@ -2811,7 +1839,6 @@ page_cur_delete_rec(
ut_ad(fil_page_index_page_check(block->frame));
ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + block->frame)
== index->id
- || index->is_dummy
|| mtr->is_inside_ibuf());
ut_ad(mtr->is_named_space(index->table->space));
@@ -2819,11 +1846,6 @@ page_cur_delete_rec(
ut_ad(page_rec_is_user_rec(current_rec));
if (page_get_n_recs(block->frame) == 1
- /* Empty the page, unless we are applying the redo log
- during crash recovery. During normal operation, the
- page_create_empty() gets logged as one of MLOG_PAGE_CREATE,
- MLOG_COMP_PAGE_CREATE, MLOG_ZIP_PAGE_COMPRESS. */
- && !recv_recovery_is_on() && !log_sys.is_physical()
&& !rec_is_alter_metadata(current_rec, *index)) {
/* Empty the page. */
ut_ad(page_is_leaf(block->frame));
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index 7b7479906cf..66b4aa55f1a 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -826,62 +826,6 @@ zip_reorganize:
return(ret);
}
-/**********************************************************//**
-Parses a log record of a record list end or start deletion.
-@return end of log record or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_parse_delete_rec_list(
-/*=======================*/
- mlog_id_t type, /*!< in: MLOG_LIST_END_DELETE,
- MLOG_LIST_START_DELETE,
- MLOG_COMP_LIST_END_DELETE or
- MLOG_COMP_LIST_START_DELETE */
- const byte* ptr, /*!< in: buffer */
- const byte* end_ptr,/*!< in: buffer end */
- buf_block_t* block, /*!< in/out: buffer block or NULL */
- dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr or NULL */
-{
- page_t* page;
- ulint offset;
-
- ut_ad(type == MLOG_LIST_END_DELETE
- || type == MLOG_LIST_START_DELETE
- || type == MLOG_COMP_LIST_END_DELETE
- || type == MLOG_COMP_LIST_START_DELETE);
-
- /* Read the record offset as a 2-byte ulint */
-
- if (end_ptr < ptr + 2) {
-
- return(NULL);
- }
-
- offset = mach_read_from_2(ptr);
- ptr += 2;
-
- if (!block) {
-
- return(ptr);
- }
-
- page = buf_block_get_frame(block);
-
- ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
-
- if (type == MLOG_LIST_END_DELETE
- || type == MLOG_COMP_LIST_END_DELETE) {
- page_delete_rec_list_end(page + offset, block, index,
- ULINT_UNDEFINED, ULINT_UNDEFINED,
- mtr);
- } else {
- page_delete_rec_list_start(page + offset, block, index, mtr);
- }
-
- return(ptr);
-}
-
/*************************************************************//**
Deletes records from a page from a given record onward, including that record.
The infimum and supremum records are not deleted. */
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index c6739f067f4..fa72090d651 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -3847,67 +3847,6 @@ void page_zip_write_rec(buf_block_t *block, const byte *rec,
#endif /* UNIV_ZIP_DEBUG */
}
-/***********************************************************//**
-Parses a log record of writing a BLOB pointer of a record.
-@return end of log record or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_zip_parse_write_blob_ptr(
-/*==========================*/
- const byte* ptr, /*!< in: redo log buffer */
- const byte* end_ptr,/*!< in: redo log buffer end */
- page_t* page, /*!< in/out: uncompressed page */
- page_zip_des_t* page_zip)/*!< in/out: compressed page */
-{
- ulint offset;
- ulint z_offset;
-
- ut_ad(ptr != NULL);
- ut_ad(end_ptr != NULL);
- ut_ad(!page == !page_zip);
-
- if (UNIV_UNLIKELY
- (end_ptr < ptr + (2 + 2 + BTR_EXTERN_FIELD_REF_SIZE))) {
-
- return(NULL);
- }
-
- offset = mach_read_from_2(ptr);
- z_offset = mach_read_from_2(ptr + 2);
-
- if (offset < PAGE_ZIP_START
- || offset >= srv_page_size
- || z_offset >= srv_page_size) {
-corrupt:
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
-
- if (page) {
-
- if (!page_zip || !page_is_leaf(page)) {
-
- goto corrupt;
- }
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
-
- memcpy(page + offset,
- ptr + 4, BTR_EXTERN_FIELD_REF_SIZE);
- memcpy(page_zip->data + z_offset,
- ptr + 4, BTR_EXTERN_FIELD_REF_SIZE);
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
- }
-
- return(ptr + (2 + 2 + BTR_EXTERN_FIELD_REF_SIZE));
-}
-
/**********************************************************************//**
Write a BLOB pointer of a record on the leaf page of a clustered index.
The information must already have been updated on the uncompressed page. */
@@ -3973,82 +3912,6 @@ page_zip_write_blob_ptr(
#endif /* UNIV_ZIP_DEBUG */
}
-/***********************************************************//**
-Parses a log record of writing the node pointer of a record.
-@return end of log record or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_zip_parse_write_node_ptr(
-/*==========================*/
- const byte* ptr, /*!< in: redo log buffer */
- const byte* end_ptr,/*!< in: redo log buffer end */
- page_t* page, /*!< in/out: uncompressed page */
- page_zip_des_t* page_zip)/*!< in/out: compressed page */
-{
- ulint offset;
- ulint z_offset;
-
- ut_ad(ptr != NULL);
- ut_ad(end_ptr!= NULL);
- ut_ad(!page == !page_zip);
-
- if (UNIV_UNLIKELY(end_ptr < ptr + (2 + 2 + REC_NODE_PTR_SIZE))) {
-
- return(NULL);
- }
-
- offset = mach_read_from_2(ptr);
- z_offset = mach_read_from_2(ptr + 2);
-
- if (offset < PAGE_ZIP_START
- || offset >= srv_page_size
- || z_offset >= srv_page_size) {
-corrupt:
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
-
- if (page) {
- byte* storage_end;
- byte* field;
- byte* storage;
- ulint heap_no;
-
- if (!page_zip || page_is_leaf(page)) {
-
- goto corrupt;
- }
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
-
- field = page + offset;
- storage = page_zip->data + z_offset;
-
- storage_end = page_zip_dir_start(page_zip);
-
- heap_no = 1 + ulint(storage_end - storage) / REC_NODE_PTR_SIZE;
-
- if (UNIV_UNLIKELY((storage_end - storage) % REC_NODE_PTR_SIZE)
- || UNIV_UNLIKELY(heap_no < PAGE_HEAP_NO_USER_LOW)
- || UNIV_UNLIKELY(heap_no >= page_dir_get_n_heap(page))) {
-
- goto corrupt;
- }
-
- memcpy(field, ptr + 4, REC_NODE_PTR_SIZE);
- memcpy(storage, ptr + 4, REC_NODE_PTR_SIZE);
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
- }
-
- return(ptr + (2 + 2 + REC_NODE_PTR_SIZE));
-}
-
/**********************************************************************//**
Write the node pointer of a record on a non-leaf compressed page. */
void
@@ -4194,67 +4057,6 @@ write:
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
}
-/** Parse a MLOG_ZIP_WRITE_TRX_ID record.
-@param[in] ptr redo log buffer
-@param[in] end_ptr end of redo log buffer
-@param[in,out] page uncompressed page
-@param[in,out] page_zip compressed page
-@return end of log record
-@retval NULL if the log record is incomplete */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_zip_parse_write_trx_id(
- const byte* ptr,
- const byte* end_ptr,
- page_t* page,
- page_zip_des_t* page_zip)
-{
- const byte* const end = 2 + 2 + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN
- + ptr;
-
- if (UNIV_UNLIKELY(end_ptr < end)) {
- return(NULL);
- }
-
- uint offset = mach_read_from_2(ptr);
- uint z_offset = mach_read_from_2(ptr + 2);
-
- if (offset < PAGE_ZIP_START
- || offset >= srv_page_size
- || z_offset >= srv_page_size) {
-corrupt:
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
-
- if (page) {
- if (!page_zip || !page_is_leaf(page)) {
- goto corrupt;
- }
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
-
- byte* field = page + offset;
- byte* storage = page_zip->data + z_offset;
-
- if (storage >= page_zip_dir_start(page_zip)) {
- goto corrupt;
- }
-
- memcpy(field, ptr + 4, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
- memcpy(storage, ptr + 4, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
- }
-
- return end;
-}
-
/**********************************************************************//**
Clear an area on the uncompressed and compressed page.
Do not clear the data payload, as that would grow the modification log. */
@@ -4589,69 +4391,10 @@ void page_zip_dir_delete(buf_block_t *block, byte *rec,
page_zip_clear_rec(block, rec, index, offsets, mtr);
}
-/***********************************************************//**
-Parses a log record of writing to the header of a page.
-@return end of log record or NULL */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte*
-page_zip_parse_write_header(
-/*========================*/
- const byte* ptr, /*!< in: redo log buffer */
- const byte* end_ptr,/*!< in: redo log buffer end */
- page_t* page, /*!< in/out: uncompressed page */
- page_zip_des_t* page_zip)/*!< in/out: compressed page */
-{
- ulint offset;
- ulint len;
-
- ut_ad(ptr != NULL);
- ut_ad(end_ptr!= NULL);
- ut_ad(!page == !page_zip);
-
- if (UNIV_UNLIKELY(end_ptr < ptr + (1 + 1))) {
-
- return(NULL);
- }
-
- offset = (ulint) *ptr++;
- len = (ulint) *ptr++;
-
- if (len == 0 || offset + len >= PAGE_DATA) {
-corrupt:
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
-
- if (end_ptr < ptr + len) {
-
- return(NULL);
- }
-
- if (page) {
- if (!page_zip) {
-
- goto corrupt;
- }
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
-
- memcpy(page + offset, ptr, len);
- memcpy(page_zip->data + offset, ptr, len);
-
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page, NULL));
-#endif /* UNIV_ZIP_DEBUG */
- }
-
- return(ptr + len);
-}
-
/**********************************************************************//**
Reorganize and compress a page. This is a low-level operation for
compressed pages, to be used when page_zip_compress() fails.
-On success, a redo log entry MLOG_ZIP_PAGE_COMPRESS will be written.
+On success, redo log will be written.
The function btr_page_reorganize() should be preferred whenever possible.
IMPORTANT: if page_zip_reorganize() is invoked on a leaf page of a
non-clustered index, the caller must update the insert buffer free
@@ -4861,72 +4604,6 @@ page_zip_copy_recs(
#endif /* UNIV_ZIP_DEBUG */
page_zip_compress_write_log(block, index, mtr);
}
-
-/** Parse and optionally apply MLOG_ZIP_PAGE_COMPRESS.
-@param[in] ptr log record
-@param[in] end_ptr end of log
-@param[in,out] block ROW_FORMAT=COMPRESSED block, or NULL for parsing only
-@return end of log record
-@retval NULL if the log record is incomplete */
-ATTRIBUTE_COLD /* only used when crash-upgrading */
-const byte* page_zip_parse_compress(const byte* ptr, const byte* end_ptr,
- buf_block_t* block)
-{
- ulint size;
- ulint trailer_size;
-
- ut_ad(ptr != NULL);
- ut_ad(end_ptr!= NULL);
-
- if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) {
-
- return(NULL);
- }
-
- size = mach_read_from_2(ptr);
- ptr += 2;
- trailer_size = mach_read_from_2(ptr);
- ptr += 2;
-
- if (UNIV_UNLIKELY(ptr + 8 + size + trailer_size > end_ptr)) {
-
- return(NULL);
- }
-
- if (block) {
- ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
- page_zip_des_t* page_zip = buf_block_get_page_zip(block);
- if (!page_zip || page_zip_get_size(page_zip) < size
- || block->page.id.page_no() < 3) {
-corrupt:
- recv_sys.found_corrupt_log = TRUE;
-
- return(NULL);
- }
-
- memset(page_zip->data, 0, page_zip_get_size(page_zip));
- mach_write_to_4(FIL_PAGE_OFFSET
- + page_zip->data, block->page.id.page_no());
- mach_write_to_4(FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID
- + page_zip->data, block->page.id.space());
- memcpy(page_zip->data + FIL_PAGE_PREV, ptr, 4);
- memcpy(page_zip->data + FIL_PAGE_NEXT, ptr + 4, 4);
- memcpy(page_zip->data + FIL_PAGE_TYPE, ptr + 8, size);
- memset(page_zip->data + FIL_PAGE_TYPE + size, 0,
- page_zip_get_size(page_zip) - trailer_size
- - (FIL_PAGE_TYPE + size));
- memcpy(page_zip->data + page_zip_get_size(page_zip)
- - trailer_size, ptr + 8 + size, trailer_size);
-
- if (UNIV_UNLIKELY(!page_zip_decompress(page_zip, block->frame,
- TRUE))) {
-
- goto corrupt;
- }
- }
-
- return(const_cast<byte*>(ptr) + 8 + size + trailer_size);
-}
#endif /* !UNIV_INNOCHECKSUM */
/** Calculate the compressed page checksum.