diff options
Diffstat (limited to 'storage/innobase/btr/btr0defragment.cc')
-rw-r--r-- | storage/innobase/btr/btr0defragment.cc | 33 |
1 files changed, 15 insertions, 18 deletions
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index aa142e15a50..0775dfe5e5f 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -63,14 +63,14 @@ UNIV_INTERN mysql_pfs_key_t btr_defragment_mutex_key; /* Number of compression failures caused by defragmentation since server start. */ -ulint btr_defragment_compression_failures = 0; +Atomic_counter<ulint> btr_defragment_compression_failures; /* Number of btr_defragment_n_pages calls that altered page but didn't manage to release any page. */ -ulint btr_defragment_failures = 0; +Atomic_counter<ulint> btr_defragment_failures; /* Total number of btr_defragment_n_pages calls that altered page. The difference between btr_defragment_count and btr_defragment_failures shows the amount of effort wasted. */ -ulint btr_defragment_count = 0; +Atomic_counter<ulint> btr_defragment_count; /******************************************************************//** Constructor for btr_defragment_item_t. */ @@ -167,7 +167,7 @@ btr_defragment_add_index( // Load index rood page. buf_block_t* block = btr_block_get( page_id_t(index->table->space_id, index->page), - page_size_t(index->table->space->flags), + index->table->space->zip_size(), RW_NO_LATCH, index, &mtr); page_t* page = NULL; @@ -377,7 +377,7 @@ btr_defragment_merge_pages( dict_index_t* index, /*!< in: index tree */ buf_block_t* from_block, /*!< in: origin of merge */ buf_block_t* to_block, /*!< in: destination of merge */ - const page_size_t page_size, /*!< in: page size of the block */ + ulint zip_size, /*!< in: ROW_FORMAT=COMPRESSED size */ ulint reserved_space, /*!< in: space reserved for future insert to avoid immediate page split */ ulint* max_data_size, /*!< in/out: max data size to @@ -405,7 +405,7 @@ btr_defragment_merge_pages( // Estimate how many records can be moved from the from_page to // the to_page. - if (page_size.is_compressed()) { + if (zip_size) { ulint page_diff = srv_page_size - *max_data_size; max_ins_size_to_use = (max_ins_size_to_use > page_diff) ? max_ins_size_to_use - page_diff : 0; @@ -449,8 +449,7 @@ btr_defragment_merge_pages( // n_recs_to_move number of records to to_page. We try to reduce // the targeted data size on the to_page by // BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE and try again. - my_atomic_addlint( - &btr_defragment_compression_failures, 1); + btr_defragment_compression_failures++; max_ins_size_to_use = move_size > BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE ? move_size - BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE @@ -474,7 +473,7 @@ btr_defragment_merge_pages( // Set ibuf free bits if necessary. if (!dict_index_is_clust(index) && page_is_leaf(to_page)) { - if (page_size.is_compressed()) { + if (zip_size) { ibuf_reset_free_bits(to_block); } else { ibuf_update_free_bits_if_full( @@ -492,7 +491,7 @@ btr_defragment_merge_pages( btr_search_drop_page_hash_index(from_block); btr_level_list_remove( index->table->space_id, - page_size, from_page, index, mtr); + zip_size, from_page, index, mtr); btr_page_get_father(index, from_block, mtr, &parent); btr_cur_node_ptr_delete(&parent, mtr); /* btr_blob_dbg_remove(from_page, index, @@ -579,7 +578,7 @@ btr_defragment_n_pages( } first_page = buf_block_get_frame(block); - const page_size_t page_size(index->table->space->flags); + const ulint zip_size = index->table->space->zip_size(); /* 1. Load the pages and calculate the total data size. */ blocks[0] = block; @@ -595,7 +594,7 @@ btr_defragment_n_pages( } blocks[i] = btr_block_get(page_id_t(index->table->space_id, - page_no), page_size, + page_no), zip_size, RW_X_LATCH, index, mtr); } @@ -621,7 +620,7 @@ btr_defragment_n_pages( optimal_page_size = page_get_free_space_of_empty( page_is_comp(first_page)); // For compressed pages, we take compression failures into account. - if (page_size.is_compressed()) { + if (zip_size) { ulint size = 0; uint i = 0; // We estimate the optimal data size of the index use samples of @@ -664,7 +663,7 @@ btr_defragment_n_pages( // Start from the second page. for (uint i = 1; i < n_pages; i ++) { buf_block_t* new_block = btr_defragment_merge_pages( - index, blocks[i], current_block, page_size, + index, blocks[i], current_block, zip_size, reserved_space, &max_data_size, heap, mtr); if (new_block != current_block) { n_defragmented ++; @@ -673,11 +672,9 @@ btr_defragment_n_pages( } mem_heap_free(heap); n_defragmented ++; - my_atomic_addlint( - &btr_defragment_count, 1); + btr_defragment_count++; if (n_pages == n_defragmented) { - my_atomic_addlint( - &btr_defragment_failures, 1); + btr_defragment_failures++; } else { index->stat_defrag_n_pages_freed += (n_pages - n_defragmented); } |