summaryrefslogtreecommitdiff
path: root/storage/innobase/ha/ha0ha.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/ha/ha0ha.cc')
-rw-r--r--storage/innobase/ha/ha0ha.cc276
1 files changed, 135 insertions, 141 deletions
diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc
index 499412ade12..cf9a454ad8d 100644
--- a/storage/innobase/ha/ha0ha.cc
+++ b/storage/innobase/ha/ha0ha.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,41 +25,29 @@ Created 8/22/1994 Heikki Tuuri
*************************************************************************/
#include "ha0ha.h"
-#ifdef UNIV_NONINL
-#include "ha0ha.ic"
-#endif
-#ifndef UNIV_HOTBACKUP
#ifdef UNIV_DEBUG
# include "buf0buf.h"
#endif /* UNIV_DEBUG */
-# include "btr0sea.h"
+#include "btr0sea.h"
#include "page0page.h"
/*************************************************************//**
Creates a hash table with at least n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
-@return own: created table */
-UNIV_INTERN
+@return own: created table */
hash_table_t*
-ha_create_func(
-/*===========*/
- ulint n, /*!< in: number of array cells */
-#ifdef UNIV_SYNC_DEBUG
- ulint sync_level, /*!< in: level of the mutexes or rw_locks
- in the latching order: this is used in the
- debug version */
-#endif /* UNIV_SYNC_DEBUG */
- ulint n_sync_obj, /*!< in: number of mutexes or rw_locks
- to protect the hash table: must be a
- power of 2, or 0 */
- ulint type) /*!< in: type of datastructure for which
- the memory heap is going to be used e.g.:
- MEM_HEAP_FOR_BTR_SEARCH or
+ib_create(
+/*======*/
+ ulint n, /*!< in: number of array cells */
+ latch_id_t id, /*!< in: latch ID */
+ ulint n_sync_obj,
+ /*!< in: number of mutexes to protect the
+ hash table: must be a power of 2, or 0 */
+ ulint type) /*!< in: type of datastructure for which
MEM_HEAP_FOR_PAGE_HASH */
{
hash_table_t* table;
- ulint i;
ut_a(type == MEM_HEAP_FOR_BTR_SEARCH
|| type == MEM_HEAP_FOR_PAGE_HASH);
@@ -71,7 +60,10 @@ ha_create_func(
if (n_sync_obj == 0) {
table->heap = mem_heap_create_typed(
- ut_min(4096, MEM_MAX_ALLOC_IN_BUF), type);
+ ut_min(static_cast<ulint>(4096),
+ MEM_MAX_ALLOC_IN_BUF / 2
+ - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
+ type);
ut_a(table->heap);
return(table);
@@ -80,61 +72,103 @@ ha_create_func(
if (type == MEM_HEAP_FOR_PAGE_HASH) {
/* We create a hash table protected by rw_locks for
buf_pool->page_hash. */
- hash_create_sync_obj(table, HASH_TABLE_SYNC_RW_LOCK,
- n_sync_obj, sync_level);
+ hash_create_sync_obj(
+ table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj);
} else {
- hash_create_sync_obj(table, HASH_TABLE_SYNC_MUTEX,
- n_sync_obj, sync_level);
+ hash_create_sync_obj(
+ table, HASH_TABLE_SYNC_MUTEX, id, n_sync_obj);
}
table->heaps = static_cast<mem_heap_t**>(
- mem_alloc(n_sync_obj * sizeof(void*)));
-
- for (i = 0; i < n_sync_obj; i++) {
- table->heaps[i] = mem_heap_create_typed(4096, type);
+ ut_malloc_nokey(n_sync_obj * sizeof(void*)));
+
+ for (ulint i = 0; i < n_sync_obj; i++) {
+ table->heaps[i] = mem_heap_create_typed(
+ ut_min(static_cast<ulint>(4096),
+ MEM_MAX_ALLOC_IN_BUF / 2
+ - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
+ type);
ut_a(table->heaps[i]);
}
return(table);
}
+/** Recreate a hash table with at least n array cells. The actual number
+of cells is chosen to be a prime number slightly bigger than n.
+The new cells are all cleared. The heaps are recreated.
+The sync objects are reused.
+@param[in,out] table hash table to be resuzed (to be freed later)
+@param[in] n number of array cells
+@return resized new table */
+hash_table_t*
+ib_recreate(
+ hash_table_t* table,
+ ulint n)
+{
+ /* This function is for only page_hash for now */
+ ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK);
+ ut_ad(table->n_sync_obj > 0);
+
+ hash_table_t* new_table = hash_create(n);
+
+ new_table->type = table->type;
+ new_table->n_sync_obj = table->n_sync_obj;
+ new_table->sync_obj = table->sync_obj;
+
+ for (ulint i = 0; i < table->n_sync_obj; i++) {
+ mem_heap_free(table->heaps[i]);
+ }
+ ut_free(table->heaps);
+
+ new_table->heaps = static_cast<mem_heap_t**>(
+ ut_malloc_nokey(new_table->n_sync_obj * sizeof(void*)));
+
+ for (ulint i = 0; i < new_table->n_sync_obj; i++) {
+ new_table->heaps[i] = mem_heap_create_typed(
+ ut_min(static_cast<ulint>(4096),
+ MEM_MAX_ALLOC_IN_BUF / 2
+ - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
+ MEM_HEAP_FOR_PAGE_HASH);
+ ut_a(new_table->heaps[i]);
+ }
+
+ return(new_table);
+}
+
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
-UNIV_INTERN
void
ha_clear(
/*=====*/
hash_table_t* table) /*!< in, own: hash table */
{
- ulint i;
- ulint n;
-
- ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
-#ifdef UNIV_SYNC_DEBUG
- ut_ad(!table->adaptive
- || rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE));
-#endif /* UNIV_SYNC_DEBUG */
-
- /* Free the memory heaps. */
- n = table->n_sync_obj;
+#ifdef BTR_CUR_HASH_ADAPT
+ ut_ad(!table->adaptive || btr_search_own_all(RW_LOCK_X));
+#endif /* BTR_CUR_HASH_ADAPT */
- for (i = 0; i < n; i++) {
+ for (ulint i = 0; i < table->n_sync_obj; i++) {
mem_heap_free(table->heaps[i]);
}
- if (table->heaps) {
- mem_free(table->heaps);
- }
+ ut_free(table->heaps);
switch (table->type) {
case HASH_TABLE_SYNC_MUTEX:
- mem_free(table->sync_obj.mutexes);
+ for (ulint i = 0; i < table->n_sync_obj; ++i) {
+ mutex_destroy(&table->sync_obj.mutexes[i]);
+ }
+ ut_free(table->sync_obj.mutexes);
table->sync_obj.mutexes = NULL;
break;
case HASH_TABLE_SYNC_RW_LOCK:
- mem_free(table->sync_obj.rw_locks);
+ for (ulint i = 0; i < table->n_sync_obj; ++i) {
+ rw_lock_free(&table->sync_obj.rw_locks[i]);
+ }
+
+ ut_free(table->sync_obj.rw_locks);
table->sync_obj.rw_locks = NULL;
break;
@@ -148,20 +182,26 @@ ha_clear(
/* Clear the hash table. */
- n = hash_get_n_cells(table);
+ ulint n = hash_get_n_cells(table);
- for (i = 0; i < n; i++) {
+ for (ulint i = 0; i < n; i++) {
hash_get_nth_cell(table, i)->node = NULL;
}
}
+#ifdef BTR_CUR_HASH_ADAPT
+# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
+/** Maximum number of records in a page */
+static const lint MAX_N_POINTERS
+ = UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES;
+# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
+
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted. If btr_search_enabled is set to FALSE, we will only allow
updating existing nodes, but no new node is allowed to be added.
-@return TRUE if succeed, FALSE if no more memory could be allocated */
-UNIV_INTERN
+@return TRUE if succeed, FALSE if no more memory could be allocated */
ibool
ha_insert_for_fold_func(
/*====================*/
@@ -202,9 +242,11 @@ ha_insert_for_fold_func(
buf_block_t* prev_block = prev_node->block;
ut_a(prev_block->frame
== page_align(prev_node->data));
- ut_a(prev_block->n_pointers > 0);
- prev_block->n_pointers--;
- block->n_pointers++;
+ ut_a(my_atomic_addlint(
+ &prev_block->n_pointers, -1)
+ < MAX_N_POINTERS);
+ ut_a(my_atomic_addlint(&block->n_pointers, 1)
+ < MAX_N_POINTERS);
}
prev_node->block = block;
@@ -235,7 +277,8 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
- block->n_pointers++;
+ ut_a(my_atomic_addlint(&block->n_pointers, 1)
+ < MAX_N_POINTERS);
}
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
@@ -262,9 +305,27 @@ ha_insert_for_fold_func(
return(TRUE);
}
+#ifdef UNIV_DEBUG
+/** Verify if latch corresponding to the hash table is x-latched
+@param[in] table hash table */
+static
+void
+ha_btr_search_latch_x_locked(const hash_table_t* table)
+{
+ ulint i;
+ for (i = 0; i < btr_ahi_parts; ++i) {
+ if (btr_search_sys->hash_tables[i] == table) {
+ break;
+ }
+ }
+
+ ut_ad(i < btr_ahi_parts);
+ ut_ad(rw_lock_own(btr_search_latches[i], RW_LOCK_X));
+}
+#endif /* UNIV_DEBUG */
+
/***********************************************************//**
Deletes a hash node. */
-UNIV_INTERN
void
ha_delete_hash_node(
/*================*/
@@ -273,15 +334,13 @@ ha_delete_hash_node(
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
-#ifdef UNIV_SYNC_DEBUG
- ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
-#endif /* UNIV_SYNC_DEBUG */
+ ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
ut_a(del_node->block->frame = page_align(del_node->data));
- ut_a(del_node->block->n_pointers > 0);
- del_node->block->n_pointers--;
+ ut_a(my_atomic_addlint(&del_node->block->n_pointers, -1)
+ < MAX_N_POINTERS);
}
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
@@ -292,7 +351,6 @@ ha_delete_hash_node(
Looks for an element when we know the pointer to the data, and updates
the pointer to data, if found.
@return TRUE if found */
-UNIV_INTERN
ibool
ha_search_and_update_if_found_func(
/*===============================*/
@@ -312,9 +370,8 @@ ha_search_and_update_if_found_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
-#ifdef UNIV_SYNC_DEBUG
- ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
-#endif /* UNIV_SYNC_DEBUG */
+
+ ut_d(ha_btr_search_latch_x_locked(table));
if (!btr_search_enabled) {
return(FALSE);
@@ -325,9 +382,10 @@ ha_search_and_update_if_found_func(
if (node) {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
- ut_a(node->block->n_pointers > 0);
- node->block->n_pointers--;
- new_block->n_pointers++;
+ ut_a(my_atomic_addlint(&node->block->n_pointers, -1)
+ < MAX_N_POINTERS);
+ ut_a(my_atomic_addlint(&new_block->n_pointers, 1)
+ < MAX_N_POINTERS);
}
node->block = new_block;
@@ -343,7 +401,6 @@ ha_search_and_update_if_found_func(
/*****************************************************************//**
Removes from the chain determined by fold all nodes whose data pointer
points to the page given. */
-UNIV_INTERN
void
ha_remove_all_nodes_to_page(
/*========================*/
@@ -386,14 +443,13 @@ ha_remove_all_nodes_to_page(
node = ha_chain_get_next(node);
}
-#endif
+#endif /* UNIV_DEBUG */
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/*************************************************************//**
Validates a given range of the cells in hash table.
-@return TRUE if ok */
-UNIV_INTERN
+@return TRUE if ok */
ibool
ha_validate(
/*========*/
@@ -421,12 +477,9 @@ ha_validate(
node = node->next) {
if (hash_calc_hash(node->fold, table) != i) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- "InnoDB: Error: hash table node"
- " fold value %lu does not\n"
- "InnoDB: match the cell number %lu.\n",
- (ulong) node->fold, (ulong) i);
+ ib::error() << "Hash table node fold value "
+ << node->fold << " does not match the"
+ " cell number " << i << ".";
ok = FALSE;
}
@@ -436,63 +489,4 @@ ha_validate(
return(ok);
}
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
-
-/*************************************************************//**
-Prints info of a hash table. */
-UNIV_INTERN
-void
-ha_print_info(
-/*==========*/
- FILE* file, /*!< in: file where to print */
- hash_table_t* table) /*!< in: hash table */
-{
-#ifdef UNIV_DEBUG
-/* Some of the code here is disabled for performance reasons in production
-builds, see http://bugs.mysql.com/36941 */
-#define PRINT_USED_CELLS
-#endif /* UNIV_DEBUG */
-
-#ifdef PRINT_USED_CELLS
- hash_cell_t* cell;
- ulint cells = 0;
- ulint i;
-#endif /* PRINT_USED_CELLS */
- ulint n_bufs;
-
- ut_ad(table);
- ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
-#ifdef PRINT_USED_CELLS
- for (i = 0; i < hash_get_n_cells(table); i++) {
-
- cell = hash_get_nth_cell(table, i);
-
- if (cell->node) {
-
- cells++;
- }
- }
-#endif /* PRINT_USED_CELLS */
-
- fprintf(file, "Hash table size %lu",
- (ulong) hash_get_n_cells(table));
-
-#ifdef PRINT_USED_CELLS
- fprintf(file, ", used cells %lu", (ulong) cells);
-#endif /* PRINT_USED_CELLS */
-
- if (table->heaps == NULL && table->heap != NULL) {
-
- /* This calculation is intended for the adaptive hash
- index: how many buffer frames we have reserved? */
-
- n_bufs = UT_LIST_GET_LEN(table->heap->base) - 1;
-
- if (table->heap->free_block) {
- n_bufs++;
- }
-
- fprintf(file, ", node heap has %lu buffer(s)\n",
- (ulong) n_bufs);
- }
-}
-#endif /* !UNIV_HOTBACKUP */
+#endif /* BTR_CUR_HASH_ADAPT */