summaryrefslogtreecommitdiff
path: root/storage/innobase/ha
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-06-18 11:37:24 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2020-06-18 14:16:01 +0300
commit9159b8976f7dfe9c956608f23df42d49ba1fcbbc (patch)
tree3bae634312d8e9c7117534d7305f233fa6e6329c /storage/innobase/ha
parent08f6513cb2369a3d5a053a43bb945aa93a3b1da7 (diff)
downloadmariadb-git-9159b8976f7dfe9c956608f23df42d49ba1fcbbc.tar.gz
MDEV-22871: Clean up hash_table_t
HASH_TABLE_SYNC_MUTEX was kind-of used for the adaptive hash index, even though that hash table is already protected by btr_search_latches[]. HASH_TABLE_SYNC_RWLOCK was only being used for buf_pool.page_hash. It is cleaner to decouple that synchronization from hash_table_t, and move it to the actual user. buf_pool_t::page_hash_latches[]: Synchronization for buf_pool.page_hash. LATCH_ID_HASH_TABLE_MUTEX: Remove. hash_table_t::sync_obj, hash_table_t::n_sync_obj: Remove. hash_table_t::type, hash_table_sync_t: Remove. HASH_ASSERT_OWN(), hash_get_mutex(), hash_get_nth_mutex(): Remove. ib_recreate(): Merge to the only caller, buf_pool_resize_hash(). ib_create(): Merge to the callers. ha_clear(): Merge to the only caller buf_pool_t::close(). buf_pool_t::create(): Merge the ib_create() and hash_create_sync_obj() invocations. ha_insert_for_fold_func(): Clarify an assertion. buf_pool_t::page_hash_lock(): Simplify the logic. hash_assert_can_search(), hash_assert_can_modify(): Remove. These predicates were only being invoked for the adaptive hash index, while they only are effective for buf_pool.page_hash. HASH_DELETE_AND_COMPACT(): Merge to ha_delete_hash_node(). hash_get_sync_obj_index(): Remove. hash_table_t::heaps[], hash_get_nth_heap(): Remove. It was actually unused! hash_get_heap(): Remove. It was only used in ha_delete_hash_node(), where we always use hash_table_t::heap. hash_table_t::calc_hash(): Replaces hash_calc_hash().
Diffstat (limited to 'storage/innobase/ha')
-rw-r--r--storage/innobase/ha/ha0ha.cc263
-rw-r--r--storage/innobase/ha/hash0hash.cc109
2 files changed, 64 insertions, 308 deletions
diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc
index f73de63a97a..f0b5fb672cd 100644
--- a/storage/innobase/ha/ha0ha.cc
+++ b/storage/innobase/ha/ha0ha.cc
@@ -32,166 +32,6 @@ Created 8/22/1994 Heikki Tuuri
#include "btr0sea.h"
#include "page0page.h"
-/*************************************************************//**
-Creates a hash table with at least n array cells. The actual number
-of cells is chosen to be a prime number slightly bigger than n.
-@return own: created table */
-hash_table_t*
-ib_create(
-/*======*/
- ulint n, /*!< in: number of array cells */
- latch_id_t id, /*!< in: latch ID */
- ulint n_sync_obj,
- /*!< in: number of mutexes to protect the
- hash table: must be a power of 2, or 0 */
- ulint type) /*!< in: type of datastructure for which
- MEM_HEAP_FOR_PAGE_HASH */
-{
- hash_table_t* table;
-
- ut_a(type == MEM_HEAP_FOR_BTR_SEARCH
- || type == MEM_HEAP_FOR_PAGE_HASH);
-
- ut_ad(ut_is_2pow(n_sync_obj));
- table = hash_create(n);
-
- /* Creating MEM_HEAP_BTR_SEARCH type heaps can potentially fail,
- but in practise it never should in this case, hence the asserts. */
-
- if (n_sync_obj == 0) {
- table->heap = mem_heap_create_typed(
- std::min<ulong>(
- 4096,
- MEM_MAX_ALLOC_IN_BUF / 2
- - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
- type);
- ut_a(table->heap);
-
- return(table);
- }
-
- if (type == MEM_HEAP_FOR_PAGE_HASH) {
- /* We create a hash table protected by rw_locks for
- buf_pool.page_hash. */
- hash_create_sync_obj(
- table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj);
- } else {
- hash_create_sync_obj(
- table, HASH_TABLE_SYNC_MUTEX, id, n_sync_obj);
- }
-
- table->heaps = static_cast<mem_heap_t**>(
- ut_malloc_nokey(n_sync_obj * sizeof(void*)));
-
- for (ulint i = 0; i < n_sync_obj; i++) {
- table->heaps[i] = mem_heap_create_typed(
- std::min<ulong>(
- 4096,
- MEM_MAX_ALLOC_IN_BUF / 2
- - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
- type);
- ut_a(table->heaps[i]);
- }
-
- return(table);
-}
-
-/** Recreate a hash table with at least n array cells. The actual number
-of cells is chosen to be a prime number slightly bigger than n.
-The new cells are all cleared. The heaps are recreated.
-The sync objects are reused.
-@param[in,out] table hash table to be resuzed (to be freed later)
-@param[in] n number of array cells
-@return resized new table */
-hash_table_t*
-ib_recreate(
- hash_table_t* table,
- ulint n)
-{
- /* This function is for only page_hash for now */
- ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK);
- ut_ad(table->n_sync_obj > 0);
-
- hash_table_t* new_table = hash_create(n);
-
- new_table->type = table->type;
- new_table->n_sync_obj = table->n_sync_obj;
- new_table->sync_obj = table->sync_obj;
-
- for (ulint i = 0; i < table->n_sync_obj; i++) {
- mem_heap_free(table->heaps[i]);
- }
- ut_free(table->heaps);
-
- new_table->heaps = static_cast<mem_heap_t**>(
- ut_malloc_nokey(new_table->n_sync_obj * sizeof(void*)));
-
- for (ulint i = 0; i < new_table->n_sync_obj; i++) {
- new_table->heaps[i] = mem_heap_create_typed(
- std::min<ulong>(
- 4096,
- MEM_MAX_ALLOC_IN_BUF / 2
- - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
- MEM_HEAP_FOR_PAGE_HASH);
- ut_a(new_table->heaps[i]);
- }
-
- return(new_table);
-}
-
-/*************************************************************//**
-Empties a hash table and frees the memory heaps. */
-void
-ha_clear(
-/*=====*/
- hash_table_t* table) /*!< in, own: hash table */
-{
- ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
-#ifdef BTR_CUR_HASH_ADAPT
- ut_ad(!table->adaptive || btr_search_own_all(RW_LOCK_X));
-#endif /* BTR_CUR_HASH_ADAPT */
-
- for (ulint i = 0; i < table->n_sync_obj; i++) {
- mem_heap_free(table->heaps[i]);
- }
-
- ut_free(table->heaps);
-
- switch (table->type) {
- case HASH_TABLE_SYNC_MUTEX:
- for (ulint i = 0; i < table->n_sync_obj; ++i) {
- mutex_destroy(&table->sync_obj.mutexes[i]);
- }
- ut_free(table->sync_obj.mutexes);
- table->sync_obj.mutexes = NULL;
- break;
-
- case HASH_TABLE_SYNC_RW_LOCK:
- for (ulint i = 0; i < table->n_sync_obj; ++i) {
- rw_lock_free(&table->sync_obj.rw_locks[i]);
- }
-
- ut_free(table->sync_obj.rw_locks);
- table->sync_obj.rw_locks = NULL;
- break;
-
- case HASH_TABLE_SYNC_NONE:
- /* do nothing */
- break;
- }
-
- table->n_sync_obj = 0;
- table->type = HASH_TABLE_SYNC_NONE;
-
-
- /* Clear the hash table. */
- ulint n = hash_get_n_cells(table);
-
- for (ulint i = 0; i < n; i++) {
- hash_get_nth_cell(table, i)->node = NULL;
- }
-}
-
#ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Maximum number of records in a page */
@@ -199,42 +39,6 @@ static const ulint MAX_N_POINTERS
= UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
-# ifdef UNIV_DEBUG
-/** Assert that the synchronization object in a hash operation involving
-possible change in the hash table is held in exclusive mode */
-void hash_assert_can_modify(hash_table_t *table, ulint fold)
-{
- switch (table->type) {
- case HASH_TABLE_SYNC_MUTEX:
- ut_ad(mutex_own(hash_get_mutex(table, fold)));
- return;
- case HASH_TABLE_SYNC_RW_LOCK:
- ut_ad(buf_pool.page_hash_lock_own_flagged(fold, RW_LOCK_FLAG_X));
- return;
- case HASH_TABLE_SYNC_NONE:
- return;
- }
- ut_ad(0);
-}
-
-/** Assert that the synchronization object in a hash operation involving
-possible change in the hash table is held in share dor exclusive mode */
-void hash_assert_can_search(hash_table_t *table, ulint fold)
-{
- switch (table->type) {
- case HASH_TABLE_SYNC_MUTEX:
- ut_ad(mutex_own(hash_get_mutex(table, fold)));
- return;
- case HASH_TABLE_SYNC_RW_LOCK:
- ut_ad(buf_pool.page_hash_lock_own_flagged(fold, RW_LOCK_FLAG_X |
- RW_LOCK_FLAG_S));
- return;
- case HASH_TABLE_SYNC_NONE:
- return;
- }
-}
-# endif
-
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
@@ -262,10 +66,10 @@ ha_insert_for_fold_func(
ut_ad(data);
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
+ ut_ad(table->heap->type & MEM_HEAP_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- hash_assert_can_modify(table, fold);
ut_ad(btr_search_enabled);
hash = hash_calc_hash(fold, table);
@@ -296,16 +100,12 @@ ha_insert_for_fold_func(
}
/* We have to allocate a new chain node */
-
node = static_cast<ha_node_t*>(
- mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t)));
+ mem_heap_alloc(table->heap, sizeof(ha_node_t)));
if (node == NULL) {
/* It was a btr search type memory heap and at the moment
no more memory could be allocated: return */
-
- ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH);
-
return(FALSE);
}
@@ -342,10 +142,8 @@ ha_insert_for_fold_func(
#ifdef UNIV_DEBUG
/** Verify if latch corresponding to the hash table is x-latched
-@param[in] table hash table */
-static
-void
-ha_btr_search_latch_x_locked(const hash_table_t* table)
+@param table hash table */
+void ha_btr_search_latch_x_locked(const hash_table_t* table)
{
ulint i;
for (i = 0; i < btr_ahi_parts; ++i) {
@@ -372,13 +170,53 @@ ha_delete_hash_node(
ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- if (table->adaptive) {
- ut_a(del_node->block->frame == page_align(del_node->data));
- ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
- }
+ ut_a(table->adaptive);
+ ut_a(del_node->block->frame == page_align(del_node->data));
+ ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node);
+ ha_node_t* node;
+
+ const ulint fold = del_node->fold;
+
+ HASH_DELETE(ha_node_t, next, table, fold, del_node);
+
+ ha_node_t* top_node = (ha_node_t*) mem_heap_get_top(table->heap,
+ sizeof(ha_node_t));
+
+ /* If the node to remove is not the top node in the heap, compact the
+ heap of nodes by moving the top node in the place of del_node. */
+
+ if (del_node != top_node) {
+ /* Copy the top node in place of del_node */
+
+ *del_node = *top_node;
+
+ hash_cell_t* cell = hash_get_nth_cell(
+ table, hash_calc_hash(top_node->fold, table));
+
+ /* Look for the pointer to the top node, to update it */
+
+ if (cell->node == top_node) {
+ /* The top node is the first in the chain */
+ cell->node = del_node;
+ } else {
+ /* We have to look for the predecessor */
+ node = static_cast<ha_node_t*>(cell->node);
+
+ while (top_node != HASH_GET_NEXT(next, node)) {
+ node = static_cast<ha_node_t*>(
+ HASH_GET_NEXT(next, node));
+ }
+
+ /* Now we have the predecessor node */
+ node->next = del_node;
+ }
+ }
+
+ /* Free the space occupied by the top node */
+
+ mem_heap_free_top(table->heap, sizeof(ha_node_t));
}
/*********************************************************//**
@@ -400,7 +238,6 @@ ha_search_and_update_if_found_func(
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
- hash_assert_can_modify(table, fold);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
@@ -444,8 +281,8 @@ ha_remove_all_nodes_to_page(
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
- hash_assert_can_modify(table, fold);
ut_ad(btr_search_enabled);
+ ut_d(ha_btr_search_latch_x_locked(table));
node = ha_chain_get_first(table, fold);
diff --git a/storage/innobase/ha/hash0hash.cc b/storage/innobase/ha/hash0hash.cc
index 17e443a8bc8..6a8c84a349d 100644
--- a/storage/innobase/ha/hash0hash.cc
+++ b/storage/innobase/ha/hash0hash.cc
@@ -28,47 +28,21 @@ Created 5/20/1997 Heikki Tuuri
#include "mem0mem.h"
#include "sync0sync.h"
-/*************************************************************//**
-Creates a hash table with >= n array cells. The actual number of cells is
-chosen to be a prime number slightly bigger than n.
-@return own: created table */
-hash_table_t*
-hash_create(
-/*========*/
- ulint n) /*!< in: number of array cells */
+/**
+Create a hash table.
+@param n the minimum number of hash array elements
+@return created table (with n_cells being a prime, at least n) */
+hash_table_t *hash_create(ulint n)
{
- hash_cell_t* array;
- ulint prime;
- hash_table_t* table;
-
- prime = ut_find_prime(n);
-
- table = static_cast<hash_table_t*>(
- ut_malloc_nokey(sizeof(hash_table_t)));
-
- array = static_cast<hash_cell_t*>(
- ut_malloc_nokey(sizeof(hash_cell_t) * prime));
-
- /* The default type of hash_table is HASH_TABLE_SYNC_NONE i.e.:
- the caller is responsible for access control to the table. */
- table->type = HASH_TABLE_SYNC_NONE;
- table->array = array;
- table->n_cells = prime;
-#ifdef BTR_CUR_HASH_ADAPT
-# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- table->adaptive = FALSE;
-# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
-#endif /* BTR_CUR_HASH_ADAPT */
- table->n_sync_obj = 0;
- table->sync_obj.mutexes = NULL;
- table->heaps = NULL;
- table->heap = NULL;
- ut_d(table->magic_n = HASH_TABLE_MAGIC_N);
-
- /* Initialize the cell array */
- hash_table_clear(table);
-
- return(table);
+ ulint prime= ut_find_prime(n);
+
+ hash_table_t *table= static_cast<hash_table_t*>
+ (ut_zalloc_nokey(sizeof *table));
+ table->array= static_cast<hash_cell_t*>(ut_zalloc_nokey(sizeof(hash_cell_t) *
+ prime));
+ table->n_cells= prime;
+ ut_d(table->magic_n= HASH_TABLE_MAGIC_N);
+ return table;
}
/*************************************************************//**
@@ -83,58 +57,3 @@ hash_table_free(
ut_free(table->array);
ut_free(table);
}
-
-/*************************************************************//**
-Creates a sync object array to protect a hash table.
-::sync_obj can be mutexes or rw_locks depening on the type of
-hash table. */
-void
-hash_create_sync_obj(
-/*=================*/
- hash_table_t* table, /*!< in: hash table */
- enum hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX
- or HASH_TABLE_SYNC_RW_LOCK */
- latch_id_t id, /*!< in: latch ID */
- ulint n_sync_obj)/*!< in: number of sync objects,
- must be a power of 2 */
-{
- ut_a(n_sync_obj > 0);
- ut_a(ut_is_2pow(n_sync_obj));
- ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
-
- table->type = type;
-
- switch (table->type) {
- case HASH_TABLE_SYNC_MUTEX:
- table->sync_obj.mutexes = static_cast<ib_mutex_t*>(
- ut_malloc_nokey(n_sync_obj * sizeof(ib_mutex_t)));
-
- for (ulint i = 0; i < n_sync_obj; i++) {
- mutex_create(id, table->sync_obj.mutexes + i);
- }
-
- break;
-
- case HASH_TABLE_SYNC_RW_LOCK: {
-
- latch_level_t level = sync_latch_get_level(id);
-
- ut_a(level != SYNC_UNKNOWN);
-
- table->sync_obj.rw_locks = static_cast<rw_lock_t*>(
- ut_malloc_nokey(n_sync_obj * sizeof(rw_lock_t)));
-
- for (ulint i = 0; i < n_sync_obj; i++) {
- rw_lock_create(hash_table_locks_key,
- table->sync_obj.rw_locks + i, level);
- }
-
- break;
- }
-
- case HASH_TABLE_SYNC_NONE:
- ut_error;
- }
-
- table->n_sync_obj = n_sync_obj;
-}