summaryrefslogtreecommitdiff
path: root/storage/innobase/page/page0cur.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/page/page0cur.cc')
-rw-r--r--storage/innobase/page/page0cur.cc326
1 files changed, 6 insertions, 320 deletions
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index b0412009b80..87c28872462 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -301,96 +301,6 @@ page_cur_rec_field_extends(
}
#endif /* PAGE_CUR_LE_OR_EXTENDS */
-/** If key is fixed length then populate offset directly from
-cached version.
-@param[in] rec B-Tree record for which offset needs to be
- populated.
-@param[in,out] index index handler
-@param[in] tuple data tuple
-@param[in,out] offsets default offsets array
-@param[in,out] heap heap
-@return reference to populate offsets. */
-static
-ulint*
-populate_offsets(
- const rec_t* rec,
- const dtuple_t* tuple,
- dict_index_t* index,
- ulint* offsets,
- mem_heap_t** heap)
-{
- ut_ad(dict_table_is_intrinsic(index->table));
-
- bool rec_has_null_values = false;
-
- if (index->rec_cache.key_has_null_cols) {
- /* Check if record has null value. */
- const byte* nulls = rec - (1 + REC_N_NEW_EXTRA_BYTES);
- ulint n_bytes_to_scan
- = UT_BITS_IN_BYTES(index->n_nullable);
- byte null_mask = 0xff;
- ulint bits_examined = 0;
-
- for (ulint i = 0; i < n_bytes_to_scan - 1; i++) {
- if (*nulls & null_mask) {
- rec_has_null_values = true;
- break;
- }
- --nulls;
- bits_examined += 8;
- }
-
- if (!rec_has_null_values) {
- null_mask >>= (8 - (index->n_nullable - bits_examined));
- rec_has_null_values = *nulls & null_mask;
- }
-
- if (rec_has_null_values) {
-
- offsets = rec_get_offsets(
- rec, index, offsets,
- dtuple_get_n_fields_cmp(tuple), heap);
-
- return(offsets);
- }
- }
-
- /* Check if offsets are cached else cache them first.
- There are queries that will first verify if key is present using index
- search and then initiate insert. If offsets are cached during index
- search it would be based on key part only but during insert that looks
- out for exact location to insert key + db_row_id both columns would
- be used and so re-compute offsets in such case. */
- if (!index->rec_cache.offsets_cached
- || (rec_offs_n_fields(index->rec_cache.offsets)
- < dtuple_get_n_fields_cmp(tuple))) {
-
- offsets = rec_get_offsets(
- rec, index, offsets,
- dtuple_get_n_fields_cmp(tuple), heap);
-
- /* Reallocate if our offset array is not big
- enough to hold the needed size. */
- ulint sz1 = index->rec_cache.sz_of_offsets;
- ulint sz2 = offsets[0];
- if (sz1 < sz2) {
- index->rec_cache.offsets = static_cast<ulint*>(
- mem_heap_alloc(
- index->heap, sizeof(ulint) * sz2));
- index->rec_cache.sz_of_offsets =
- static_cast<uint32_t>(sz2);
- }
-
- memcpy(index->rec_cache.offsets,
- offsets, (sizeof(ulint) * sz2));
- index->rec_cache.offsets_cached = true;
- }
-
- ut_ad(index->rec_cache.offsets[2] = (ulint) rec);
-
- return(index->rec_cache.offsets);
-}
-
/****************************************************************//**
Searches the right position for a page cursor. */
void
@@ -522,17 +432,9 @@ page_cur_search_with_match(
up_matched_fields);
offsets = offsets_;
- if (index->rec_cache.fixed_len_key) {
- offsets = populate_offsets(
- mid_rec, tuple,
- const_cast<dict_index_t*>(index),
- offsets, &heap);
- } else {
- offsets = rec_get_offsets(
- mid_rec, index, offsets,
- dtuple_get_n_fields_cmp(tuple), &heap);
-
- }
+ offsets = rec_get_offsets(
+ mid_rec, index, offsets,
+ dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, mid_rec, offsets, &cur_matched_fields);
@@ -584,17 +486,9 @@ up_slot_match:
up_matched_fields);
offsets = offsets_;
- if (index->rec_cache.fixed_len_key) {
- offsets = populate_offsets(
- mid_rec, tuple,
- const_cast<dict_index_t*>(index),
- offsets, &heap);
- } else {
- offsets = rec_get_offsets(
- mid_rec, index, offsets,
- dtuple_get_n_fields_cmp(tuple), &heap);
-
- }
+ offsets = rec_get_offsets(
+ mid_rec, index, offsets,
+ dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, mid_rec, offsets, &cur_matched_fields);
@@ -1554,214 +1448,6 @@ use_heap:
return(insert_rec);
}
-/** Inserts a record next to page cursor on an uncompressed page.
-@param[in] current_rec pointer to current record after which
- the new record is inserted.
-@param[in] index record descriptor
-@param[in] tuple pointer to a data tuple
-@param[in] n_ext number of externally stored columns
-@param[in] mtr mini-transaction handle, or NULL
-
-@return pointer to record if succeed, NULL otherwise */
-rec_t*
-page_cur_direct_insert_rec_low(
- rec_t* current_rec,
- dict_index_t* index,
- const dtuple_t* tuple,
- ulint n_ext,
- mtr_t* mtr)
-{
- byte* insert_buf;
- ulint rec_size;
- page_t* page; /*!< the relevant page */
- rec_t* last_insert; /*!< cursor position at previous
- insert */
- rec_t* free_rec; /*!< a free record that was reused,
- or NULL */
- rec_t* insert_rec; /*!< inserted record */
- ulint heap_no; /*!< heap number of the inserted
- record */
-
- page = page_align(current_rec);
-
- ut_ad(dict_table_is_comp(index->table)
- == (ibool) !!page_is_comp(page));
-
- ut_ad(fil_page_index_page_check(page));
-
- ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
- == index->id);
-
- ut_ad(!page_rec_is_supremum(current_rec));
-
- /* 1. Get the size of the physical record in the page */
- rec_size = index->rec_cache.rec_size;
-
- /* 2. Try to find suitable space from page memory management */
- free_rec = page_header_get_ptr(page, PAGE_FREE);
- if (free_rec) {
- /* Try to allocate from the head of the free list. */
- ulint foffsets_[REC_OFFS_NORMAL_SIZE];
- ulint* foffsets = foffsets_;
- mem_heap_t* heap = NULL;
-
- rec_offs_init(foffsets_);
-
- foffsets = rec_get_offsets(
- free_rec, index, foffsets, ULINT_UNDEFINED, &heap);
- if (rec_offs_size(foffsets) < rec_size) {
- if (heap != NULL) {
- mem_heap_free(heap);
- heap = NULL;
- }
-
- free_rec = NULL;
- insert_buf = page_mem_alloc_heap(
- page, NULL, rec_size, &heap_no);
-
- if (insert_buf == NULL) {
- return(NULL);
- }
- } else {
- insert_buf = free_rec - rec_offs_extra_size(foffsets);
-
- if (page_is_comp(page)) {
- heap_no = rec_get_heap_no_new(free_rec);
- page_mem_alloc_free(
- page, NULL,
- rec_get_next_ptr(free_rec, TRUE),
- rec_size);
- } else {
- heap_no = rec_get_heap_no_old(free_rec);
- page_mem_alloc_free(
- page, NULL,
- rec_get_next_ptr(free_rec, FALSE),
- rec_size);
- }
-
- if (heap != NULL) {
- mem_heap_free(heap);
- heap = NULL;
- }
- }
- } else {
- free_rec = NULL;
- insert_buf = page_mem_alloc_heap(page, NULL,
- rec_size, &heap_no);
-
- if (insert_buf == NULL) {
- return(NULL);
- }
- }
-
- /* 3. Create the record */
- insert_rec = rec_convert_dtuple_to_rec(insert_buf, index, tuple, n_ext);
-
- /* 4. Insert the record in the linked list of records */
- ut_ad(current_rec != insert_rec);
-
- {
- /* next record after current before the insertion */
- rec_t* next_rec = page_rec_get_next(current_rec);
-#ifdef UNIV_DEBUG
- if (page_is_comp(page)) {
- ut_ad(rec_get_status(current_rec)
- <= REC_STATUS_INFIMUM);
- ut_ad(rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
- ut_ad(rec_get_status(next_rec) != REC_STATUS_INFIMUM);
- }
-#endif
- page_rec_set_next(insert_rec, next_rec);
- page_rec_set_next(current_rec, insert_rec);
- }
-
- page_header_set_field(page, NULL, PAGE_N_RECS,
- 1 + page_get_n_recs(page));
-
- /* 5. Set the n_owned field in the inserted record to zero,
- and set the heap_no field */
- if (page_is_comp(page)) {
- rec_set_n_owned_new(insert_rec, NULL, 0);
- rec_set_heap_no_new(insert_rec, heap_no);
- } else {
- rec_set_n_owned_old(insert_rec, 0);
- rec_set_heap_no_old(insert_rec, heap_no);
- }
-
- /* 6. Update the last insertion info in page header */
-
- last_insert = page_header_get_ptr(page, PAGE_LAST_INSERT);
- ut_ad(!last_insert || !page_is_comp(page)
- || rec_get_node_ptr_flag(last_insert)
- == rec_get_node_ptr_flag(insert_rec));
-
- if (last_insert == NULL) {
- page_header_set_field(page, NULL, PAGE_DIRECTION,
- PAGE_NO_DIRECTION);
- page_header_set_field(page, NULL, PAGE_N_DIRECTION, 0);
-
- } else if ((last_insert == current_rec)
- && (page_header_get_field(page, PAGE_DIRECTION)
- != PAGE_LEFT)) {
-
- page_header_set_field(page, NULL, PAGE_DIRECTION,
- PAGE_RIGHT);
- page_header_set_field(page, NULL, PAGE_N_DIRECTION,
- page_header_get_field(
- page, PAGE_N_DIRECTION) + 1);
-
- } else if ((page_rec_get_next(insert_rec) == last_insert)
- && (page_header_get_field(page, PAGE_DIRECTION)
- != PAGE_RIGHT)) {
-
- page_header_set_field(page, NULL, PAGE_DIRECTION,
- PAGE_LEFT);
- page_header_set_field(page, NULL, PAGE_N_DIRECTION,
- page_header_get_field(
- page, PAGE_N_DIRECTION) + 1);
- } else {
- page_header_set_field(page, NULL, PAGE_DIRECTION,
- PAGE_NO_DIRECTION);
- page_header_set_field(page, NULL, PAGE_N_DIRECTION, 0);
- }
-
- page_header_set_ptr(page, NULL, PAGE_LAST_INSERT, insert_rec);
-
- /* 7. It remains to update the owner record. */
- {
- rec_t* owner_rec = page_rec_find_owner_rec(insert_rec);
- ulint n_owned;
- if (page_is_comp(page)) {
- n_owned = rec_get_n_owned_new(owner_rec);
- rec_set_n_owned_new(owner_rec, NULL, n_owned + 1);
- } else {
- n_owned = rec_get_n_owned_old(owner_rec);
- rec_set_n_owned_old(owner_rec, n_owned + 1);
- }
-
- /* 8. Now we have incremented the n_owned field of the owner
- record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
- we have to split the corresponding directory slot in two. */
-
- if (n_owned == PAGE_DIR_SLOT_MAX_N_OWNED) {
- page_dir_split_slot(
- page, NULL,
- page_dir_find_owner_slot(owner_rec));
- }
- }
-
- /* 8. Open the mtr for name sake to set the modification flag
- to true failing which no flush would be done. */
- byte* log_ptr = mlog_open(mtr, 0);
- ut_ad(log_ptr == NULL);
- if (log_ptr != NULL) {
- /* To keep complier happy. */
- mlog_close(mtr, log_ptr);
- }
-
- return(insert_rec);
-}
-
/***********************************************************//**
Inserts a record next to page cursor on a compressed and uncompressed
page. Returns pointer to inserted record if succeed, i.e.,