summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-03-18 21:48:00 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-03-18 22:32:40 +0200
commita786f50de5ce17748d851cf4264399c277d61925 (patch)
tree8a107091cec37ee88a58ebbffb2bcd0fa4ceb241
parente0eacbee77fd56255d1073d7cb3e738b7de0285c (diff)
downloadmariadb-git-a786f50de5ce17748d851cf4264399c277d61925.tar.gz
MDEV-21962 Allocate buf_pool statically
Thanks to MDEV-15058, there is only one InnoDB buffer pool. Allocating buf_pool statically removes one level of pointer indirection and makes code more readable, and removes the awkward initialization of some buf_pool members. While doing this, we will also declare some buf_pool_t data members private and replace some functions with member functions. This is mostly affecting buffer pool resizing. This is not aiming to be a complete rewrite of buf_pool_t to a proper class. Most of the buffer pool interface, such as buf_page_get_gen(), will remain in the C programming style for now. buf_pool_t::withdrawing: Replaces buf_pool_withdrawing. buf_pool_t::withdraw_clock_: Replaces buf_withdraw_clock. buf_pool_t::create(): Repalces buf_pool_init(). buf_pool_t::close(): Replaces buf_pool_free(). buf_bool_t::will_be_withdrawn(): Replaces buf_block_will_be_withdrawn(), buf_frame_will_be_withdrawn(). buf_pool_t::clear_hash_index(): Replaces buf_pool_clear_hash_index(). buf_pool_t::get_n_pages(): Replaces buf_pool_get_n_pages(). buf_pool_t::validate(): Replaces buf_validate(). buf_pool_t::print(): Replaces buf_print(). buf_pool_t::block_from_ahi(): Replaces buf_block_from_ahi(). buf_pool_t::is_block_field(): Replaces buf_pointer_is_block_field(). buf_pool_t::is_block_mutex(): Replaces buf_pool_is_block_mutex(). buf_pool_t::is_block_lock(): Replaces buf_pool_is_block_lock(). buf_pool_t::is_obsolete(): Replaces buf_pool_is_obsolete(). buf_pool_t::io_buf: Make default-constructible. buf_pool_t::io_buf::create(): Delayed 'constructor' buf_pool_t::io_buf::close(): Early 'destructor' HazardPointer: Make default-constructible. Define all member functions inline, also for derived classes.
-rw-r--r--storage/innobase/btr/btr0cur.cc14
-rw-r--r--storage/innobase/btr/btr0pcur.cc8
-rw-r--r--storage/innobase/btr/btr0sea.cc110
-rw-r--r--storage/innobase/buf/buf0buddy.cc138
-rw-r--r--storage/innobase/buf/buf0buf.cc1832
-rw-r--r--storage/innobase/buf/buf0dump.cc16
-rw-r--r--storage/innobase/buf/buf0flu.cc352
-rw-r--r--storage/innobase/buf/buf0lru.cc415
-rw-r--r--storage/innobase/buf/buf0rea.cc52
-rw-r--r--storage/innobase/gis/gis0sea.cc4
-rw-r--r--storage/innobase/ha/ha0ha.cc2
-rw-r--r--storage/innobase/handler/ha_innodb.cc16
-rw-r--r--storage/innobase/handler/i_s.cc44
-rw-r--r--storage/innobase/include/btr0sea.h2
-rw-r--r--storage/innobase/include/buf0buddy.h12
-rw-r--r--storage/innobase/include/buf0buf.h742
-rw-r--r--storage/innobase/include/buf0buf.ic97
-rw-r--r--storage/innobase/include/buf0lru.h30
-rw-r--r--storage/innobase/include/buf0types.h4
-rw-r--r--storage/innobase/include/srv0srv.h10
-rw-r--r--storage/innobase/lock/lock0lock.cc6
-rw-r--r--storage/innobase/log/log0log.cc4
-rw-r--r--storage/innobase/log/log0recv.cc29
-rw-r--r--storage/innobase/srv/srv0mon.cc32
-rw-r--r--storage/innobase/srv/srv0srv.cc44
-rw-r--r--storage/innobase/srv/srv0start.cc9
-rw-r--r--storage/innobase/sync/sync0debug.cc8
-rw-r--r--storage/innobase/trx/trx0rec.cc5
-rw-r--r--storage/innobase/trx/trx0undo.cc4
29 files changed, 1873 insertions, 2168 deletions
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 55ddbdc14ee..23e5d477d9c 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1389,7 +1389,7 @@ btr_cur_search_to_nth_level_func(
#else
info = btr_search_get_info(index);
- if (!buf_pool_is_obsolete(info->withdraw_clock)) {
+ if (!buf_pool.is_obsolete(info->withdraw_clock)) {
guess = info->root_guess;
} else {
guess = NULL;
@@ -1461,7 +1461,7 @@ btr_cur_search_to_nth_level_func(
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
- && buf_pool->n_pend_reads) {
+ && buf_pool.n_pend_reads) {
x_latch_index:
mtr_x_lock_index(index, mtr);
} else if (index->is_spatial()
@@ -1837,7 +1837,7 @@ retry_page_get:
#ifdef BTR_CUR_ADAPT
if (block != guess) {
info->root_guess = block;
- info->withdraw_clock = buf_withdraw_clock;
+ info->withdraw_clock = buf_pool.withdraw_clock();
}
#endif
}
@@ -2590,7 +2590,7 @@ btr_cur_open_at_index_side_func(
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
- && buf_pool->n_pend_reads) {
+ && buf_pool.n_pend_reads) {
mtr_x_lock_index(index, mtr);
} else {
mtr_sx_lock_index(index, mtr);
@@ -2917,7 +2917,7 @@ btr_cur_open_at_rnd_pos_func(
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
- && buf_pool->n_pend_reads) {
+ && buf_pool.n_pend_reads) {
mtr_x_lock_index(index, mtr);
} else {
mtr_sx_lock_index(index, mtr);
@@ -7062,7 +7062,7 @@ btr_blob_free(
mtr_commit(mtr);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* Only free the block if it is still allocated to
the same file page. */
@@ -7081,7 +7081,7 @@ btr_blob_free(
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Helper class used while writing blob pages, during insert or update. */
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index fb08f0229e3..77b27b8eb26 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2019, MariaDB Corporation.
+Copyright (c) 2016, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -202,7 +202,7 @@ before_first:
/* Function try to check if block is S/X latch. */
cursor->modify_clock = buf_block_get_modify_clock(block);
- cursor->withdraw_clock = buf_withdraw_clock;
+ cursor->withdraw_clock = buf_pool.withdraw_clock();
}
/**************************************************************//**
@@ -309,7 +309,7 @@ btr_pcur_restore_position_func(
case BTR_MODIFY_PREV:
/* Try optimistic restoration. */
- if (!buf_pool_is_obsolete(cursor->withdraw_clock)
+ if (!buf_pool.is_obsolete(cursor->withdraw_clock)
&& btr_cur_optimistic_latch_leaves(
cursor->block_when_stored, cursor->modify_clock,
&latch_mode, btr_pcur_get_btr_cur(cursor),
@@ -416,7 +416,7 @@ btr_pcur_restore_position_func(
cursor->modify_clock = buf_block_get_modify_clock(
cursor->block_when_stored);
cursor->old_stored = true;
- cursor->withdraw_clock = buf_withdraw_clock;
+ cursor->withdraw_clock = buf_pool.withdraw_clock();
mem_heap_free(heap);
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 562a59f7628..a73543246fc 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -394,7 +394,7 @@ void btr_search_disable(bool need_mutex)
}
/* Set all block->index = NULL. */
- buf_pool_clear_hash_index();
+ buf_pool.clear_hash_index();
/* Clear the adaptive hash index. */
for (ulint i = 0; i < btr_ahi_parts; ++i) {
@@ -408,12 +408,12 @@ void btr_search_disable(bool need_mutex)
/** Enable the adaptive hash search system. */
void btr_search_enable()
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size != srv_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_lock_all();
btr_search_enabled = true;
@@ -840,6 +840,82 @@ btr_search_failure(btr_search_t* info, btr_cur_t* cursor)
info->last_hash_succ = FALSE;
}
+#ifdef BTR_CUR_HASH_ADAPT
+/** Clear the adaptive hash index on all pages in the buffer pool. */
+inline void buf_pool_t::clear_hash_index()
+{
+ ut_ad(btr_search_own_all(RW_LOCK_X));
+ ut_ad(!resizing);
+ ut_ad(!btr_search_enabled);
+
+ for (chunk_t *chunk= chunks + n_chunks; --chunk != chunks; )
+ {
+ for (buf_block_t *block= chunk->blocks, * const end= block + chunk->size;
+ block != end; block++)
+ {
+ dict_index_t *index= block->index;
+ assert_block_ahi_valid(block);
+
+ /* We can clear block->index block->n_pointers when
+ btr_search_own_all(RW_LOCK_X); see the comments in buf0buf.h */
+ if (!index)
+ {
+# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
+ ut_a(!block->n_pointers);
+# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
+ continue;
+ }
+
+ ut_d(buf_page_state state= buf_block_get_state(block));
+ /* Another thread may have set the state to
+ BUF_BLOCK_REMOVE_HASH in buf_LRU_block_remove_hashed().
+
+ The state change in buf_pool_t::realloc() is not observable
+ here, because in that case we would have !block->index.
+
+ In the end, the entire adaptive hash index will be removed. */
+ ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
+# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
+ block->n_pointers= 0;
+# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
+ block->index= nullptr;
+ }
+ }
+}
+#endif /* BTR_CUR_HASH_ADAPT */
+
+/** Get a buffer block from an adaptive hash index pointer.
+This function does not return if the block is not identified.
+@param ptr pointer to within a page frame
+@return pointer to block, never NULL */
+inline buf_block_t* buf_pool_t::block_from_ahi(const byte *ptr) const
+{
+ chunk_t::map *chunk_map = chunk_t::map_ref;
+ ut_ad(chunk_t::map_ref == chunk_t::map_reg);
+ ut_ad(!resizing);
+
+ chunk_t::map::const_iterator it= chunk_map->upper_bound(ptr);
+ ut_a(it != chunk_map->begin());
+
+ chunk_t *chunk= it == chunk_map->end()
+ ? chunk_map->rbegin()->second
+ : (--it)->second;
+
+ const size_t offs= size_t(ptr - chunk->blocks->frame) >> srv_page_size_shift;
+ ut_a(offs < chunk->size);
+
+ buf_block_t *block= &chunk->blocks[offs];
+ /* buf_pool_t::chunk_t::init() invokes buf_block_init() so that
+ block[n].frame == block->frame + n * srv_page_size. Check it. */
+ ut_ad(block->frame == page_align(ptr));
+ /* Read the state of the block without holding a mutex.
+ A state transition from BUF_BLOCK_FILE_PAGE to
+ BUF_BLOCK_REMOVE_HASH is possible during this execution. */
+ ut_d(const buf_page_state state = buf_block_get_state(block));
+ ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
+ return block;
+}
+
/** Tries to guess the right search position based on the hash search info
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
and the function returns TRUE, then cursor->up_match and cursor->low_match
@@ -944,7 +1020,7 @@ fail:
return(FALSE);
}
- buf_block_t* block = buf_block_from_ahi(rec);
+ buf_block_t* block = buf_pool.block_from_ahi(rec);
if (use_latch) {
mutex_enter(&block->mutex);
@@ -983,7 +1059,7 @@ got_no_latch:
}
mtr->memo_push(block, fix_type);
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
rw_lock_s_unlock(use_latch);
@@ -1074,7 +1150,7 @@ got_no_latch:
#endif
/* Increment the page get statistics though we did not really
fix the page: for user info only */
- ++buf_pool->stat.n_page_gets;
+ ++buf_pool.stat.n_page_gets;
if (!ahi_latch) {
buf_page_make_young_if_needed(&block->page);
@@ -1087,7 +1163,7 @@ got_no_latch:
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
- has already been removed from the buf_pool->page_hash
+ has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block)
{
@@ -1112,7 +1188,7 @@ retry:
not in the adaptive hash index. */
index = block->index;
/* This debug check uses a dirty read that could theoretically cause
- false positives while buf_pool_clear_hash_index() is executing. */
+ false positives while buf_pool.clear_hash_index() is executing. */
assert_block_ahi_valid(block);
ut_ad(!btr_search_own_any(RW_LOCK_S));
ut_ad(!btr_search_own_any(RW_LOCK_X));
@@ -1990,7 +2066,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
rec_offs_init(offsets_);
btr_search_x_lock_all();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@@ -2000,13 +2076,13 @@ btr_search_hash_table_validate(ulint hash_table_id)
give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@@ -2026,7 +2102,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
for (; node != NULL; node = node->next) {
const buf_block_t* block
- = buf_block_from_ahi((byte*) node->data);
+ = buf_pool.block_from_ahi((byte*) node->data);
const buf_block_t* hash_block;
index_id_t page_index_id;
@@ -2050,7 +2126,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
/* When a block is being freed,
buf_LRU_search_and_free_block() first
removes the block from
- buf_pool->page_hash by calling
+ buf_pool.page_hash by calling
buf_LRU_block_remove_hashed_page().
After that, it invokes
btr_search_drop_page_hash_index() to
@@ -2112,13 +2188,13 @@ btr_search_hash_table_validate(ulint hash_table_id)
/* We release search latches every once in a while to
give other queries a chance to run. */
if (i != 0) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@@ -2141,7 +2217,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
if (UNIV_LIKELY_NULL(heap)) {
diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc
index f660ff508c1..a78974992b0 100644
--- a/storage/innobase/buf/buf0buddy.cc
+++ b/storage/innobase/buf/buf0buddy.cc
@@ -184,26 +184,26 @@ struct CheckZipFree {
@param[in] i buddy size to validate */
static void buf_buddy_list_validate(ulint i)
{
- ut_list_validate(buf_pool->zip_free[i], CheckZipFree(i));
+ ut_list_validate(buf_pool.zip_free[i], CheckZipFree(i));
}
/**********************************************************************//**
Debug function to validate that a buffer is indeed free i.e.: in the
zip_free[].
@param[in] buf block to check
-@param[in] i index of buf_pool->zip_free[]
+@param[in] i index of buf_pool.zip_free[]
@return true if free */
static bool buf_buddy_check_free(const buf_buddy_free_t* buf, ulint i)
{
const ulint size = BUF_BUDDY_LOW << i;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!ut_align_offset(buf, size));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
buf_buddy_free_t* itr;
- for (itr = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
+ for (itr = UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
itr && itr != buf;
itr = UT_LIST_GET_NEXT(list, itr)) {
}
@@ -223,7 +223,7 @@ buf_buddy_is_free(
/*==============*/
buf_buddy_free_t* buf, /*!< in: block to check */
ulint i) /*!< in: index of
- buf_pool->zip_free[] */
+ buf_pool.zip_free[] */
{
#ifdef UNIV_DEBUG
const ulint size = BUF_BUDDY_LOW << i;
@@ -261,54 +261,54 @@ buf_buddy_is_free(
/** Add a block to the head of the appropriate buddy free list.
@param[in,out] buf block to be freed
-@param[in] i index of buf_pool->zip_free[] */
+@param[in] i index of buf_pool.zip_free[] */
UNIV_INLINE
void
buf_buddy_add_to_free(buf_buddy_free_t* buf, ulint i)
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(buf_pool->zip_free[i].start != buf);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(buf_pool.zip_free[i].start != buf);
buf_buddy_stamp_free(buf, i);
- UT_LIST_ADD_FIRST(buf_pool->zip_free[i], buf);
+ UT_LIST_ADD_FIRST(buf_pool.zip_free[i], buf);
ut_d(buf_buddy_list_validate(i));
}
/** Remove a block from the appropriate buddy free list.
@param[in,out] buf block to be freed
-@param[in] i index of buf_pool->zip_free[] */
+@param[in] i index of buf_pool.zip_free[] */
UNIV_INLINE
void
buf_buddy_remove_from_free(buf_buddy_free_t* buf, ulint i)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_buddy_check_free(buf, i));
- UT_LIST_REMOVE(buf_pool->zip_free[i], buf);
+ UT_LIST_REMOVE(buf_pool.zip_free[i], buf);
buf_buddy_stamp_nonfree(buf, i);
}
-/** Try to allocate a block from buf_pool->zip_free[].
-@param[in] i index of buf_pool->zip_free[]
-@return allocated block, or NULL if buf_pool->zip_free[] was empty */
+/** Try to allocate a block from buf_pool.zip_free[].
+@param[in] i index of buf_pool.zip_free[]
+@return allocated block, or NULL if buf_pool.zip_free[] was empty */
static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i)
{
buf_buddy_free_t* buf;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(i < BUF_BUDDY_SIZES);
ut_a(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
ut_d(buf_buddy_list_validate(i));
- buf = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
+ buf = UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
- if (buf_pool->curr_size < buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ if (buf_pool.curr_size < buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.withdraw)
+ < buf_pool.withdraw_target) {
while (buf != NULL
- && buf_frame_will_be_withdrawn(
+ && buf_pool.will_be_withdrawn(
reinterpret_cast<byte*>(buf))) {
/* This should be withdrawn, not to be allocated */
buf = UT_LIST_GET_NEXT(list, buf);
@@ -326,7 +326,7 @@ static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i)
reinterpret_cast<buf_buddy_free_t*>(
buf->stamp.bytes
+ (BUF_BUDDY_LOW << i));
- ut_ad(!buf_pool_contains_zip(buddy));
+ ut_ad(!buf_pool.contains_zip(buddy));
buf_buddy_add_to_free(buddy, i);
}
}
@@ -356,11 +356,11 @@ buf_buddy_block_free(void* buf)
buf_page_t* bpage;
buf_block_t* block;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_a(!ut_align_offset(buf, srv_page_size));
- HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
+ HASH_SEARCH(hash, buf_pool.zip_hash, fold, buf_page_t*, bpage,
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY
&& bpage->in_zip_hash && !bpage->in_page_hash),
((buf_block_t*) bpage)->frame == buf);
@@ -369,7 +369,7 @@ buf_buddy_block_free(void* buf)
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->in_zip_hash);
ut_d(bpage->in_zip_hash = FALSE);
- HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage);
+ HASH_DELETE(buf_page_t, hash, buf_pool.zip_hash, fold, bpage);
ut_d(memset(buf, 0, srv_page_size));
UNIV_MEM_INVALID(buf, srv_page_size);
@@ -379,8 +379,8 @@ buf_buddy_block_free(void* buf)
buf_LRU_block_free_non_file_page(block);
buf_page_mutex_exit(block);
- ut_ad(buf_pool->buddy_n_frames > 0);
- ut_d(buf_pool->buddy_n_frames--);
+ ut_ad(buf_pool.buddy_n_frames > 0);
+ ut_d(buf_pool.buddy_n_frames--);
}
/**********************************************************************//**
@@ -392,8 +392,8 @@ buf_buddy_block_register(
buf_block_t* block) /*!< in: buffer frame to allocate */
{
const ulint fold = BUF_POOL_ZIP_FOLD(block);
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_READY_FOR_USE);
buf_block_set_state(block, BUF_BLOCK_MEMORY);
@@ -404,15 +404,15 @@ buf_buddy_block_register(
ut_ad(!block->page.in_page_hash);
ut_ad(!block->page.in_zip_hash);
ut_d(block->page.in_zip_hash = TRUE);
- HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page);
+ HASH_INSERT(buf_page_t, hash, buf_pool.zip_hash, fold, &block->page);
- ut_d(buf_pool->buddy_n_frames++);
+ ut_d(buf_pool.buddy_n_frames++);
}
/** Allocate a block from a bigger object.
@param[in] buf a block that is free to use
-@param[in] i index of buf_pool->zip_free[]
-@param[in] j size of buf as an index of buf_pool->zip_free[]
+@param[in] i index of buf_pool.zip_free[]
+@param[in] j size of buf as an index of buf_pool.zip_free[]
@return allocated block */
static
void*
@@ -441,15 +441,15 @@ buf_buddy_alloc_from(void* buf, ulint i, ulint j)
}
/** Allocate a block.
-@param[in] i index of buf_pool->zip_free[] or BUF_BUDDY_SIZES
-@param[out] lru whether buf_pool->mutex was temporarily released
+@param[in] i index of buf_pool.zip_free[] or BUF_BUDDY_SIZES
+@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
byte *buf_buddy_alloc_low(ulint i, bool *lru)
{
buf_block_t* block;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
if (i < BUF_BUDDY_SIZES) {
@@ -461,7 +461,7 @@ byte *buf_buddy_alloc_low(ulint i, bool *lru)
}
}
- /* Try allocating from the buf_pool->free list. */
+ /* Try allocating from the buf_pool.free list. */
block = buf_LRU_get_free_only();
if (block) {
@@ -469,9 +469,9 @@ byte *buf_buddy_alloc_low(ulint i, bool *lru)
}
/* Try replacing an uncompressed page in the buffer pool. */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
block = buf_LRU_get_free_block();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (lru) {
*lru = true;
}
@@ -483,7 +483,7 @@ alloc_big:
block->frame, i, BUF_BUDDY_SIZES);
func_exit:
- buf_pool->buddy_stat[i].used++;
+ buf_pool.buddy_stat[i].used++;
return reinterpret_cast<byte*>(block);
}
@@ -491,7 +491,7 @@ func_exit:
function will release and lock it again.
@param[in] src block to relocate
@param[in] dst free block to relocated to
-@param[in] i index of buf_pool->zip_free[]
+@param[in] i index of buf_pool.zip_free[]
@param[in] force true if we must relocated always
@return true if relocated */
static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
@@ -501,8 +501,8 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
ulint space;
ulint offset;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(!ut_align_offset(src, size));
ut_ad(!ut_align_offset(dst, size));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
@@ -531,7 +531,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
if (!bpage || bpage->zip.data != src) {
/* The block has probably been freshly
allocated by buf_LRU_get_free_block() but not
- added to buf_pool->page_hash yet. Obviously,
+ added to buf_pool.page_hash yet. Obviously,
it cannot be relocated. */
rw_lock_x_unlock(hash_lock);
@@ -543,7 +543,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
/* It might be just uninitialized page.
We should search from LRU list also. */
- bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
while (bpage != NULL) {
if (bpage->zip.data == src) {
hash_lock = buf_page_hash_lock_get(bpage->id);
@@ -593,7 +593,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
buf_buddy_mem_invalid(
reinterpret_cast<buf_buddy_free_t*>(src), i);
- buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i];
+ buf_buddy_stat_t* buddy_stat = &buf_pool.buddy_stat[i];
buddy_stat->relocated++;
buddy_stat->relocated_usec+= (my_interval_timer() - ns) / 1000;
return(true);
@@ -608,18 +608,18 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
/** Deallocate a block.
@param[in] buf block to be freed, must not be pointed to
by the buffer pool
-@param[in] i index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
+@param[in] i index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
void buf_buddy_free_low(void* buf, ulint i)
{
buf_buddy_free_t* buddy;
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
- ut_ad(buf_pool->buddy_stat[i].used > 0);
+ ut_ad(buf_pool.buddy_stat[i].used > 0);
- buf_pool->buddy_stat[i].used--;
+ buf_pool.buddy_stat[i].used--;
recombine:
UNIV_MEM_ALLOC(buf, BUF_BUDDY_LOW << i);
@@ -630,13 +630,13 @@ recombine:
ut_ad(i < BUF_BUDDY_SIZES);
ut_ad(buf == ut_align_down(buf, BUF_BUDDY_LOW << i));
- ut_ad(!buf_pool_contains_zip(buf));
+ ut_ad(!buf_pool.contains_zip(buf));
/* Do not recombine blocks if there are few free blocks.
We may waste up to 15360*max_len bytes to free blocks
(1024 + 2048 + 4096 + 8192 = 15360) */
- if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16
- && buf_pool->curr_size >= buf_pool->old_size) {
+ if (UT_LIST_GET_LEN(buf_pool.zip_free[i]) < 16
+ && buf_pool.curr_size >= buf_pool.old_size) {
goto func_exit;
}
@@ -650,7 +650,7 @@ recombine:
/* The buddy is free: recombine */
buf_buddy_remove_from_free(buddy, i);
buddy_is_free:
- ut_ad(!buf_pool_contains_zip(buddy));
+ ut_ad(!buf_pool.contains_zip(buddy));
i++;
buf = ut_align_down(buf, BUF_BUDDY_LOW << i);
@@ -662,7 +662,7 @@ buddy_is_free:
/* The buddy is not free. Is there a free block of
this size? */
if (buf_buddy_free_t* zip_buf =
- UT_LIST_GET_FIRST(buf_pool->zip_free[i])) {
+ UT_LIST_GET_FIRST(buf_pool.zip_free[i])) {
/* Remove the block from the free list, because
a successful buf_buddy_relocate() will overwrite
@@ -700,8 +700,8 @@ buf_buddy_realloc(void* buf, ulint size)
buf_block_t* block = NULL;
ulint i = buf_buddy_get_slot(size);
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->zip_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
@@ -711,7 +711,7 @@ buf_buddy_realloc(void* buf, ulint size)
}
if (block == NULL) {
- /* Try allocating from the buf_pool->free list. */
+ /* Try allocating from the buf_pool.free list. */
block = buf_LRU_get_free_only();
if (block == NULL) {
@@ -725,7 +725,7 @@ buf_buddy_realloc(void* buf, ulint size)
block->frame, i, BUF_BUDDY_SIZES));
}
- buf_pool->buddy_stat[i].used++;
+ buf_pool.buddy_stat[i].used++;
/* Try to relocate the buddy of buf to the free block. */
if (buf_buddy_relocate(buf, block, i, true)) {
@@ -742,16 +742,16 @@ buf_buddy_realloc(void* buf, ulint size)
/** Combine all pairs of free buddies. */
void buf_buddy_condense_free()
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(buf_pool->curr_size < buf_pool->old_size);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(buf_pool.curr_size < buf_pool.old_size);
- for (ulint i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) {
+ for (ulint i = 0; i < UT_ARR_SIZE(buf_pool.zip_free); ++i) {
buf_buddy_free_t* buf =
- UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
+ UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
/* seek to withdraw target */
while (buf != NULL
- && !buf_frame_will_be_withdrawn(
+ && !buf_pool.will_be_withdrawn(
reinterpret_cast<byte*>(buf))) {
buf = UT_LIST_GET_NEXT(list, buf);
}
@@ -769,7 +769,7 @@ void buf_buddy_condense_free()
/* seek to the next withdraw target */
while (true) {
while (next != NULL
- && !buf_frame_will_be_withdrawn(
+ && !buf_pool.will_be_withdrawn(
reinterpret_cast<byte*>(next))) {
next = UT_LIST_GET_NEXT(list, next);
}
@@ -786,7 +786,7 @@ void buf_buddy_condense_free()
/* Both buf and buddy are free.
Try to combine them. */
buf_buddy_remove_from_free(buf, i);
- buf_pool->buddy_stat[i].used++;
+ buf_pool.buddy_stat[i].used++;
buf_buddy_free_low(buf, i);
}
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 5625e0ab2cc..67f49f43207 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -116,21 +116,6 @@ struct set_numa_interleave_t
IMPLEMENTATION OF THE BUFFER POOL
=================================
-Performance improvement:
-------------------------
-Thread scheduling in NT may be so slow that the OS wait mechanism should
-not be used even in waiting for disk reads to complete.
-Rather, we should put waiting query threads to the queue of
-waiting jobs, and let the OS thread do something useful while the i/o
-is processed. In this way we could remove most OS thread switches in
-an i/o-intensive benchmark like TPC-C.
-
-A possibility is to put a user space thread library between the database
-and NT. User space thread libraries might be very fast.
-
-SQL Server 7.0 can be configured to use 'fibers' which are lightweight
-threads in NT. These should be studied.
-
Buffer frames and blocks
------------------------
Following the terminology of Gray and Reuter, we call the memory
@@ -144,21 +129,21 @@ in the file along with the file page, resides in the control block.
The buffer buf_pool contains a single mutex which protects all the
control data structures of the buf_pool. The content of a buffer frame is
protected by a separate read-write lock in its control block, though.
-These locks can be locked and unlocked without owning the buf_pool->mutex.
+These locks can be locked and unlocked without owning the buf_pool.mutex.
The OS events in the buf_pool struct can be waited for without owning the
-buf_pool->mutex.
+buf_pool.mutex.
-The buf_pool->mutex is a hot-spot in main memory, causing a lot of
+The buf_pool.mutex is a hot-spot in main memory, causing a lot of
memory bus traffic on multiprocessor systems when processors
alternately access the mutex. On our Pentium, the mutex is accessed
maybe every 10 microseconds. We gave up the solution to have mutexes
for each control block, for instance, because it seemed to be
complicated.
-A solution to reduce mutex contention of the buf_pool->mutex is to
+A solution to reduce mutex contention of the buf_pool.mutex is to
create a separate mutex for the page hash table. On Pentium,
accessing the hash table takes 2 microseconds, about half
-of the total buf_pool->mutex hold time.
+of the total buf_pool.mutex hold time.
Control blocks
--------------
@@ -202,7 +187,7 @@ in the database, using tables whose size is a power of 2.
There are several lists of control blocks.
-The free list (buf_pool->free) contains blocks which are currently not
+The free list (buf_pool.free) contains blocks which are currently not
used.
The common LRU list contains all the blocks holding a file page
@@ -228,20 +213,20 @@ holds. The blocks in unzip_LRU will be in same order as they are in
the common LRU list. That is, each manipulation of the common LRU
list will result in the same manipulation of the unzip_LRU list.
-The chain of modified blocks (buf_pool->flush_list) contains the blocks
+The chain of modified blocks (buf_pool.flush_list) contains the blocks
holding file pages that have been modified in the memory
but not written to disk yet. The block with the oldest modification
which has not yet been written to disk is at the end of the chain.
-The access to this list is protected by buf_pool->flush_list_mutex.
+The access to this list is protected by buf_pool.flush_list_mutex.
-The chain of unmodified compressed blocks (buf_pool->zip_clean)
+The chain of unmodified compressed blocks (buf_pool.zip_clean)
contains the control blocks (buf_page_t) of those compressed pages
-that are not in buf_pool->flush_list and for which no uncompressed
+that are not in buf_pool.flush_list and for which no uncompressed
page has been allocated in the buffer pool. The control blocks for
uncompressed pages are accessible via buf_block_t objects that are
-reachable via buf_pool->chunks[].
+reachable via buf_pool.chunks[].
-The chains of free memory blocks (buf_pool->zip_free[]) are used by
+The chains of free memory blocks (buf_pool.zip_free[]) are used by
the buddy allocator (buf0buddy.cc) to keep track of currently unused
memory blocks of size sizeof(buf_page_t)..srv_page_size / 2. These
blocks are inside the srv_page_size-sized memory blocks of type
@@ -312,35 +297,10 @@ static const ulint BUF_READ_AHEAD_PAGES = 64;
read-ahead buffer. (Divide buf_pool size by this amount) */
static const ulint BUF_READ_AHEAD_PORTION = 32;
-/** The buffer pool of the database */
-buf_pool_t* buf_pool;
-
-/** true when resizing buffer pool is in the critical path. */
-volatile bool buf_pool_resizing;
-
-/** true when withdrawing buffer pool pages might cause page relocation */
-volatile bool buf_pool_withdrawing;
-
-/** the clock is incremented every time a pointer to a page may become obsolete;
-if the withdrwa clock has not changed, the pointer is still valid in buffer
-pool. if changed, the pointer might not be in buffer pool any more. */
-volatile ulint buf_withdraw_clock;
-
-/** Map of buffer pool chunks by its first frame address
-This is newly made by initialization of buffer pool and buf_resize_thread.
-Currently, no need mutex protection for update. */
-typedef std::map<
- const byte*,
- buf_chunk_t*,
- std::less<const byte*>,
- ut_allocator<std::pair<const byte* const, buf_chunk_t*> > >
- buf_pool_chunk_map_t;
-
-static buf_pool_chunk_map_t* buf_chunk_map_reg;
-
-/** Chunk map to be used to lookup.
-The map pointed by this should not be updated */
-static buf_pool_chunk_map_t* buf_chunk_map_ref = NULL;
+/** The InnoDB buffer pool */
+buf_pool_t buf_pool;
+buf_pool_t::chunk_t::map *buf_pool_t::chunk_t::map_reg;
+buf_pool_t::chunk_t::map *buf_pool_t::chunk_t::map_ref;
#ifdef UNIV_DEBUG
/** Disable resizing buffer pool to make assertion code not expensive. */
@@ -379,17 +339,6 @@ on the io_type */
: (counter##_WRITTEN))
-/** Registers a chunk to buf_pool_chunk_map
-@param[in] chunk chunk of buffers */
-static
-void
-buf_pool_register_chunk(
- buf_chunk_t* chunk)
-{
- buf_chunk_map_reg->insert(buf_pool_chunk_map_t::value_type(
- chunk->blocks->frame, chunk));
-}
-
/** Decrypt a page for temporary tablespace.
@param[in,out] tmp_frame Temporary buffer
@param[in] src_frame Page to decrypt
@@ -452,7 +401,7 @@ static bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space)
if (space->purpose == FIL_TYPE_TEMPORARY
&& innodb_encrypt_temporary_tables) {
- buf_tmp_buffer_t* slot = buf_pool->io_buf.reserve();
+ buf_tmp_buffer_t* slot = buf_pool.io_buf_reserve();
ut_a(slot);
slot->allocate();
@@ -483,7 +432,7 @@ decompress:
return false;
}
- slot = buf_pool->io_buf.reserve();
+ slot = buf_pool.io_buf_reserve();
ut_a(slot);
slot->allocate();
@@ -514,7 +463,7 @@ decrypt_failed:
return false;
}
- slot = buf_pool->io_buf.reserve();
+ slot = buf_pool.io_buf_reserve();
ut_a(slot);
slot->allocate();
ut_d(fil_page_type_validate(space, dst_frame));
@@ -547,7 +496,7 @@ decrypt_failed:
lsn_t
buf_pool_get_oldest_modification()
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_page_t* bpage;
@@ -555,14 +504,14 @@ buf_pool_get_oldest_modification()
list. We would only need to write out temporary pages if the
page is about to be evicted from the buffer pool, and the page
contents is still needed (the page has not been freed). */
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage != NULL && fsp_is_system_temporary(bpage->id.space());
bpage = UT_LIST_GET_PREV(list, bpage)) {
ut_ad(bpage->in_flush_list);
}
lsn_t oldest_lsn = bpage ? bpage->oldest_modification : 0;
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
/* The returned answer may be out of date: the flush_list can
change after the mutex has been released. */
@@ -1102,14 +1051,14 @@ buf_madvise_do_dump()
ret+= madvise(recv_sys.buf, recv_sys.len, MADV_DODUMP);
}
- mutex_enter(&buf_pool->mutex);
- buf_chunk_t* chunk = buf_pool->chunks;
+ mutex_enter(&buf_pool.mutex);
+ auto chunk = buf_pool.chunks;
- for (ulint n = buf_pool->n_chunks; n--; chunk++) {
+ for (ulint n = buf_pool.n_chunks; n--; chunk++) {
ret+= madvise(chunk->mem, chunk->mem_size(), MADV_DODUMP);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return ret;
}
#endif
@@ -1294,7 +1243,7 @@ static
void
pfs_register_buffer_block(
/*======================*/
- buf_chunk_t* chunk) /*!< in/out: chunk of buffers */
+ buf_pool_t::chunk_t* chunk) /*!< in/out: chunk of buffers */
{
buf_block_t* block;
ulint num_to_register;
@@ -1346,7 +1295,7 @@ buf_block_init(buf_block_t* block, byte* frame)
UNIV_MEM_DESC(frame, srv_page_size);
/* This function should only be executed at database startup or by
- buf_pool_resize(). Either way, adaptive hash index must not exist. */
+ buf_pool.resize(). Either way, adaptive hash index must not exist. */
assert_block_ahi_empty_on_init(block);
block->frame = frame;
@@ -1407,210 +1356,138 @@ buf_block_init(buf_block_t* block, byte* frame)
}
/** Allocate a chunk of buffer frames.
-@param[in,out] chunk chunk of buffers
-@param[in] mem_size requested size in bytes
-@return chunk
-@retval NULL on failure */
-static buf_chunk_t* buf_chunk_init(buf_chunk_t* chunk, ulint mem_size)
+@param bytes requested size
+@return whether the allocation succeeded */
+inline bool buf_pool_t::chunk_t::create(size_t bytes)
{
- buf_block_t* block;
- byte* frame;
- ulint i;
+ /* Round down to a multiple of page size, although it already should be. */
+ bytes= ut_2pow_round<size_t>(bytes, srv_page_size);
- /* Round down to a multiple of page size,
- although it already should be. */
- mem_size = ut_2pow_round<ulint>(mem_size, srv_page_size);
+ mem= buf_pool.allocator.allocate_large_dontdump(bytes, &mem_pfx);
- DBUG_EXECUTE_IF("ib_buf_chunk_init_fails", return(NULL););
-
- chunk->mem = buf_pool->allocator.allocate_large_dontdump(mem_size, &chunk->mem_pfx);
-
- if (UNIV_UNLIKELY(chunk->mem == NULL)) {
-
- return(NULL);
- }
+ if (UNIV_UNLIKELY(!mem))
+ return false;
#ifdef HAVE_LIBNUMA
- if (srv_numa_interleave) {
- struct bitmask *numa_mems_allowed = numa_get_mems_allowed();
- if (mbind(chunk->mem, chunk->mem_size(),
- MPOL_INTERLEAVE,
- numa_mems_allowed->maskp,
- numa_mems_allowed->size,
- MPOL_MF_MOVE)) {
- ib::warn() << "Failed to set NUMA memory policy of"
- " buffer pool page frames to MPOL_INTERLEAVE"
- " (error: " << strerror(errno) << ").";
- }
- numa_bitmask_free(numa_mems_allowed);
- }
+ if (srv_numa_interleave)
+ {
+ struct bitmask *numa_mems_allowed= numa_get_mems_allowed();
+ if (mbind(mem, mem_size(), MPOL_INTERLEAVE,
+ numa_mems_allowed->maskp, numa_mems_allowed->size,
+ MPOL_MF_MOVE))
+ {
+ ib::warn() << "Failed to set NUMA memory policy of"
+ " buffer pool page frames to MPOL_INTERLEAVE"
+ " (error: " << strerror(errno) << ").";
+ }
+ numa_bitmask_free(numa_mems_allowed);
+ }
#endif /* HAVE_LIBNUMA */
- /* Allocate the block descriptors from
- the start of the memory block. */
- chunk->blocks = (buf_block_t*) chunk->mem;
-
- /* Align a pointer to the first frame. Note that when
- opt_large_page_size is smaller than srv_page_size,
- we may allocate one fewer block than requested. When
- it is bigger, we may allocate more blocks than requested. */
- static_assert(sizeof(byte*) == sizeof(ulint), "pointer size");
-
- frame = reinterpret_cast<byte*>((reinterpret_cast<ulint>(chunk->mem)
- + srv_page_size - 1)
- & ~ulint{srv_page_size - 1});
- chunk->size = (chunk->mem_pfx.m_size >> srv_page_size_shift)
- - (frame != chunk->mem);
-
- /* Subtract the space needed for block descriptors. */
- {
- ulint size = chunk->size;
+ /* Allocate the block descriptors from
+ the start of the memory block. */
+ blocks= reinterpret_cast<buf_block_t*>(mem);
- while (frame < (byte*) (chunk->blocks + size)) {
- frame += srv_page_size;
- size--;
- }
+ /* Align a pointer to the first frame. Note that when
+ opt_large_page_size is smaller than srv_page_size,
+ we may allocate one fewer block than requested. When
+ it is bigger, we may allocate more blocks than requested. */
+ static_assert(sizeof(byte*) == sizeof(ulint), "pointer size");
- chunk->size = size;
- }
+ byte *frame= reinterpret_cast<byte*>((reinterpret_cast<ulint>(mem) +
+ srv_page_size - 1) &
+ ~ulint{srv_page_size - 1});
+ size= (mem_pfx.m_size >> srv_page_size_shift) - (frame != mem);
- /* Init block structs and assign frames for them. Then we
- assign the frames to the first blocks (we already mapped the
- memory above). */
+ /* Subtract the space needed for block descriptors. */
+ {
+ ulint s= size;
- block = chunk->blocks;
+ while (frame < reinterpret_cast<const byte*>(blocks + s))
+ {
+ frame+= srv_page_size;
+ s--;
+ }
- for (i = chunk->size; i--; ) {
+ size= s;
+ }
- buf_block_init(block, frame);
- UNIV_MEM_INVALID(block->frame, srv_page_size);
+ /* Init block structs and assign frames for them. Then we assign the
+ frames to the first blocks (we already mapped the memory above). */
- /* Add the block to the free list */
- UT_LIST_ADD_LAST(buf_pool->free, &block->page);
+ buf_block_t *block= blocks;
- ut_d(block->page.in_free_list = TRUE);
+ for (auto i= size; i--; ) {
+ buf_block_init(block, frame);
+ UNIV_MEM_INVALID(block->frame, srv_page_size);
+ /* Add the block to the free list */
+ UT_LIST_ADD_LAST(buf_pool.free, &block->page);
- block++;
- frame += srv_page_size;
- }
+ ut_d(block->page.in_free_list = TRUE);
+ block++;
+ frame+= srv_page_size;
+ }
- buf_pool_register_chunk(chunk);
+ reg();
#ifdef PFS_GROUP_BUFFER_SYNC
- pfs_register_buffer_block(chunk);
+ pfs_register_buffer_block(this);
#endif /* PFS_GROUP_BUFFER_SYNC */
- return(chunk);
+ return true;
}
#ifdef UNIV_DEBUG
-/*********************************************************************//**
-Finds a block in the given buffer chunk that points to a
-given compressed page.
-@return buffer block pointing to the compressed page, or NULL */
-static
-buf_block_t*
-buf_chunk_contains_zip(
-/*===================*/
- buf_chunk_t* chunk, /*!< in: chunk being checked */
- const void* data) /*!< in: pointer to compressed page */
-{
- buf_block_t* block;
- ulint i;
-
- block = chunk->blocks;
-
- for (i = chunk->size; i--; block++) {
- if (block->page.zip.data == data) {
-
- return(block);
- }
- }
-
- return(NULL);
-}
-
-/** Finds a block in the buffer pool that points to a given compressed page.
-@param[in] data pointer to compressed page
-@return buffer block pointing to the compressed page
-@retval NULL if not found */
-buf_block_t* buf_pool_contains_zip(const void* data)
+/** Check that all file pages in the buffer chunk are in a replaceable state.
+@return address of a non-free block
+@retval nullptr if all freed */
+inline const buf_block_t *buf_pool_t::chunk_t::not_freed() const
{
- buf_chunk_t* chunk = buf_pool->chunks;
-
- ut_ad(mutex_own(&buf_pool->mutex));
- for (ulint n = buf_pool->n_chunks; n--; chunk++) {
- if (buf_block_t* block = buf_chunk_contains_zip(chunk, data)) {
- return(block);
- }
- }
+ buf_block_t *block= blocks;
+ for (auto i= size; i--; block++)
+ {
+ switch (buf_block_get_state(block)) {
+ case BUF_BLOCK_POOL_WATCH:
+ case BUF_BLOCK_ZIP_PAGE:
+ case BUF_BLOCK_ZIP_DIRTY:
+ /* The uncompressed buffer pool should never
+ contain ROW_FORMAT=COMPRESSED block descriptors. */
+ ut_error;
+ break;
+ case BUF_BLOCK_NOT_USED:
+ case BUF_BLOCK_READY_FOR_USE:
+ case BUF_BLOCK_MEMORY:
+ case BUF_BLOCK_REMOVE_HASH:
+ /* Skip blocks that are not being used for file pages. */
+ break;
+ case BUF_BLOCK_FILE_PAGE:
+ if (srv_read_only_mode)
+ {
+ /* The page cleaner is disabled in read-only mode. No pages
+ can be dirtied, so all of them must be clean. */
+ ut_ad(block->page.oldest_modification == 0 ||
+ block->page.oldest_modification == recv_sys.recovered_lsn ||
+ srv_force_recovery == SRV_FORCE_NO_LOG_REDO);
+ ut_ad(block->page.buf_fix_count == 0);
+ ut_ad(block->page.io_fix == BUF_IO_NONE);
+ break;
+ }
+
+ buf_page_mutex_enter(block);
+ auto ready= buf_flush_ready_for_replace(&block->page);
+ buf_page_mutex_exit(block);
+
+ if (!ready)
+ return block;
+
+ break;
+ }
+ }
- return(NULL);
+ return nullptr;
}
#endif /* UNIV_DEBUG */
-/*********************************************************************//**
-Checks that all file pages in the buffer chunk are in a replaceable state.
-@return address of a non-free block, or NULL if all freed */
-static
-const buf_block_t*
-buf_chunk_not_freed(
-/*================*/
- buf_chunk_t* chunk) /*!< in: chunk being checked */
-{
- buf_block_t* block;
- ulint i;
-
- block = chunk->blocks;
-
- for (i = chunk->size; i--; block++) {
- ibool ready;
-
- switch (buf_block_get_state(block)) {
- case BUF_BLOCK_POOL_WATCH:
- case BUF_BLOCK_ZIP_PAGE:
- case BUF_BLOCK_ZIP_DIRTY:
- /* The uncompressed buffer pool should never
- contain compressed block descriptors. */
- ut_error;
- break;
- case BUF_BLOCK_NOT_USED:
- case BUF_BLOCK_READY_FOR_USE:
- case BUF_BLOCK_MEMORY:
- case BUF_BLOCK_REMOVE_HASH:
- /* Skip blocks that are not being used for
- file pages. */
- break;
- case BUF_BLOCK_FILE_PAGE:
- if (srv_read_only_mode) {
- /* The page cleaner is disabled in
- read-only mode. No pages can be
- dirtied, so all of them must be clean. */
- ut_ad(block->page.oldest_modification == 0
- || block->page.oldest_modification
- == recv_sys.recovered_lsn
- || srv_force_recovery
- == SRV_FORCE_NO_LOG_REDO);
- ut_ad(block->page.buf_fix_count == 0);
- ut_ad(block->page.io_fix == BUF_IO_NONE);
- break;
- }
-
- buf_page_mutex_enter(block);
- ready = buf_flush_ready_for_replace(&block->page);
- buf_page_mutex_exit(block);
-
- if (!ready) {
- return(block);
- }
-
- break;
- }
- }
-
- return(NULL);
-}
-
/** Free the synchronization objects of a buffer pool block descriptor
@param[in,out] block buffer pool block descriptor */
static void buf_block_free_mutexes(buf_block_t* block)
@@ -1623,245 +1500,204 @@ static void buf_block_free_mutexes(buf_block_t* block)
/** Create the buffer pool.
@return whether the creation failed */
-bool buf_pool_init()
+bool buf_pool_t::create()
{
- ulint i;
- ulint chunk_size;
- buf_chunk_t* chunk;
+ ut_ad(this == &buf_pool);
+ ut_ad(srv_buf_pool_size % srv_buf_pool_chunk_unit == 0);
+ ut_ad(!is_initialised());
+ ut_ad(srv_buf_pool_size > 0);
- ut_ad(srv_buf_pool_size % srv_buf_pool_chunk_unit == 0);
- ut_ad(!buf_pool);
+ NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
- NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
-
- buf_pool_resizing = false;
- buf_pool_withdrawing = false;
- buf_withdraw_clock = 0;
- buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t());
- buf_pool = (buf_pool_t*) ut_zalloc_nokey(sizeof *buf_pool);
+ ut_ad(!resizing);
+ ut_ad(!withdrawing);
+ ut_ad(!withdraw_clock());
+ ut_ad(!chunks_old);
- /* 1. Initialize general fields
- ------------------------------- */
- mutex_create(LATCH_ID_BUF_POOL, &buf_pool->mutex);
+ chunk_t::map_reg= UT_NEW_NOKEY(chunk_t::map());
- mutex_create(LATCH_ID_BUF_POOL_ZIP, &buf_pool->zip_mutex);
+ new(&allocator) ut_allocator<unsigned char>(mem_key_buf_buf_pool);
- new(&buf_pool->allocator)
- ut_allocator<unsigned char>(mem_key_buf_buf_pool);
+ n_chunks= srv_buf_pool_size / srv_buf_pool_chunk_unit;
+ const size_t chunk_size= srv_buf_pool_chunk_unit;
- mutex_enter(&buf_pool->mutex);
+ chunks= static_cast<buf_pool_t::chunk_t*>(ut_zalloc_nokey(n_chunks *
+ sizeof *chunks));
+ UT_LIST_INIT(free, &buf_page_t::list);
+ curr_size= 0;
+ auto chunk= chunks;
- if (srv_buf_pool_size > 0) {
- buf_pool->n_chunks
- = srv_buf_pool_size / srv_buf_pool_chunk_unit;
- chunk_size = srv_buf_pool_chunk_unit;
+ do
+ {
+ if (!chunk->create(chunk_size))
+ {
+ while (--chunk >= chunks)
+ {
+ buf_block_t* block= chunk->blocks;
+
+ for (auto i= chunk->size; i--; block++)
+ buf_block_free_mutexes(block);
+
+ allocator.deallocate_large_dodump(chunk->mem, &chunk->mem_pfx,
+ chunk->mem_size());
+ }
+ ut_free(chunks);
+ chunks= nullptr;
+ UT_DELETE(chunk_t::map_reg);
+ chunk_t::map_reg= nullptr;
+ ut_ad(!is_initialised());
+ return true;
+ }
+
+ curr_size+= chunk->size;
+ }
+ while (++chunk < chunks + n_chunks);
- buf_pool->chunks =
- reinterpret_cast<buf_chunk_t*>(ut_zalloc_nokey(
- buf_pool->n_chunks * sizeof(*chunk)));
- buf_pool->chunks_old = NULL;
+ ut_ad(is_initialised());
+ mutex_create(LATCH_ID_BUF_POOL, &mutex);
+ mutex_create(LATCH_ID_BUF_POOL_ZIP, &zip_mutex);
- UT_LIST_INIT(buf_pool->LRU, &buf_page_t::LRU);
- UT_LIST_INIT(buf_pool->free, &buf_page_t::list);
- UT_LIST_INIT(buf_pool->withdraw, &buf_page_t::list);
- buf_pool->withdraw_target = 0;
- UT_LIST_INIT(buf_pool->flush_list, &buf_page_t::list);
- UT_LIST_INIT(buf_pool->unzip_LRU, &buf_block_t::unzip_LRU);
+ UT_LIST_INIT(LRU, &buf_page_t::LRU);
+ UT_LIST_INIT(withdraw, &buf_page_t::list);
+ withdraw_target= 0;
+ UT_LIST_INIT(flush_list, &buf_page_t::list);
+ UT_LIST_INIT(unzip_LRU, &buf_block_t::unzip_LRU);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- UT_LIST_INIT(buf_pool->zip_clean, &buf_page_t::list);
+ UT_LIST_INIT(zip_clean, &buf_page_t::list);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- for (i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) {
- UT_LIST_INIT(
- buf_pool->zip_free[i], &buf_buddy_free_t::list);
- }
-
- buf_pool->curr_size = 0;
- chunk = buf_pool->chunks;
-
- do {
- if (!buf_chunk_init(chunk, chunk_size)) {
- while (--chunk >= buf_pool->chunks) {
- buf_block_t* block = chunk->blocks;
-
- for (i = chunk->size; i--; block++) {
- buf_block_free_mutexes(block);
- }
-
- buf_pool->allocator.deallocate_large_dodump(
- chunk->mem, &chunk->mem_pfx, chunk->mem_size());
- }
- ut_free(buf_pool->chunks);
- mutex_exit(&buf_pool->mutex);
+ for (size_t i= 0; i < UT_ARR_SIZE(zip_free); ++i)
+ UT_LIST_INIT(zip_free[i], &buf_buddy_free_t::list);
- return true;
- }
+ read_ahead_area= ut_min(BUF_READ_AHEAD_PAGES,
+ ut_2_power_up(curr_size / BUF_READ_AHEAD_PORTION));
+ curr_pool_size= srv_buf_pool_size;
- buf_pool->curr_size += chunk->size;
- } while (++chunk < buf_pool->chunks + buf_pool->n_chunks);
+ old_size= curr_size;
+ n_chunks_new= n_chunks;
- buf_pool->read_ahead_area =
- ut_min(BUF_READ_AHEAD_PAGES,
- ut_2_power_up(buf_pool->curr_size /
- BUF_READ_AHEAD_PORTION));
- buf_pool->curr_pool_size = srv_buf_pool_size;
+ /* Number of locks protecting page_hash must be a power of two */
+ srv_n_page_hash_locks= static_cast<ulong>
+ (ut_2_power_up(srv_n_page_hash_locks));
+ ut_a(srv_n_page_hash_locks != 0);
+ ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS);
- buf_pool->old_size = buf_pool->curr_size;
- buf_pool->n_chunks_new = buf_pool->n_chunks;
+ page_hash= ib_create(2 * curr_size,
+ LATCH_ID_HASH_TABLE_RW_LOCK,
+ srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH);
- /* Number of locks protecting page_hash must be a
- power of two */
- srv_n_page_hash_locks = static_cast<ulong>(
- ut_2_power_up(srv_n_page_hash_locks));
- ut_a(srv_n_page_hash_locks != 0);
- ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS);
+ ut_ad(!page_hash_old);
+ zip_hash= hash_create(2 * curr_size);
+ last_printout_time= time(NULL);
- buf_pool->page_hash = ib_create(
- 2 * buf_pool->curr_size,
- LATCH_ID_HASH_TABLE_RW_LOCK,
- srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH);
+ mutex_create(LATCH_ID_FLUSH_LIST, &flush_list_mutex);
- buf_pool->page_hash_old = NULL;
+ for (int i= BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++)
+ no_flush[i]= os_event_create(0);
- buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
+ watch= static_cast<buf_page_t*>
+ (ut_zalloc_nokey(sizeof *watch * BUF_POOL_WATCH_SIZE));
- buf_pool->last_printout_time = time(NULL);
- }
- /* 2. Initialize flushing fields
- -------------------------------- */
+ try_LRU_scan= true;
- mutex_create(LATCH_ID_FLUSH_LIST, &buf_pool->flush_list_mutex);
+ ut_d(flush_hp.m_mutex= &flush_list_mutex;);
+ ut_d(lru_hp.m_mutex= &mutex);
+ ut_d(lru_scan_itr.m_mutex= &mutex);
+ ut_d(single_scan_itr.m_mutex= &mutex);
- for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
- buf_pool->no_flush[i] = os_event_create(0);
- }
+ io_buf.create((srv_n_read_io_threads + srv_n_write_io_threads) *
+ OS_AIO_N_PENDING_IOS_PER_THREAD);
- buf_pool->watch = (buf_page_t*) ut_zalloc_nokey(
- sizeof(*buf_pool->watch) * BUF_POOL_WATCH_SIZE);
+ /* FIXME: remove some of these variables */
+ srv_buf_pool_curr_size= curr_pool_size;
+ srv_buf_pool_old_size= srv_buf_pool_size;
+ srv_buf_pool_base_size= srv_buf_pool_size;
- /* All fields are initialized by ut_zalloc_nokey(). */
-
- buf_pool->try_LRU_scan = TRUE;
-
- /* Initialize the hazard pointer for flush_list batches */
- new(&buf_pool->flush_hp) FlushHp(&buf_pool->flush_list_mutex);
-
- /* Initialize the hazard pointer for LRU batches */
- new(&buf_pool->lru_hp) LRUHp(&buf_pool->mutex);
-
- /* Initialize the iterator for LRU scan search */
- new(&buf_pool->lru_scan_itr) LRUItr(&buf_pool->mutex);
-
- /* Initialize the iterator for single page scan search */
- new(&buf_pool->single_scan_itr) LRUItr(&buf_pool->mutex);
-
- /* Initialize the temporal memory array and slots */
- new(&buf_pool->io_buf) buf_pool_t::io_buf_t(
- (srv_n_read_io_threads + srv_n_write_io_threads)
- * OS_AIO_N_PENDING_IOS_PER_THREAD);
-
- /* FIXME: remove some of these variables */
- srv_buf_pool_curr_size = buf_pool->curr_pool_size;
- srv_buf_pool_old_size = srv_buf_pool_size;
- srv_buf_pool_base_size = srv_buf_pool_size;
-
- mutex_exit(&buf_pool->mutex);
- DBUG_EXECUTE_IF("buf_pool_init_instance_force_oom", return true;);
-
- buf_chunk_map_ref = buf_chunk_map_reg;
- buf_LRU_old_ratio_update(100 * 3 / 8, false);
- btr_search_sys_create(srv_buf_pool_curr_size / sizeof(void*) / 64);
- return false;
+ chunk_t::map_ref= chunk_t::map_reg;
+ buf_LRU_old_ratio_update(100 * 3 / 8, false);
+ btr_search_sys_create(srv_buf_pool_curr_size / sizeof(void*) / 64);
+ ut_ad(is_initialised());
+ return false;
}
-/** Free the buffer pool at shutdown.
-This must not be invoked before freeing all mutexes. */
-void buf_pool_free()
+/** Clean up after successful create() */
+void buf_pool_t::close()
{
- buf_chunk_t* chunk;
- buf_chunk_t* chunks;
- buf_page_t* bpage;
- buf_page_t* prev_bpage = 0;
-
- mutex_free(&buf_pool->mutex);
- mutex_free(&buf_pool->zip_mutex);
- mutex_free(&buf_pool->flush_list_mutex);
-
- if (buf_pool->flush_rbt) {
- rbt_free(buf_pool->flush_rbt);
- buf_pool->flush_rbt = NULL;
- }
-
- for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
- bpage != NULL;
- bpage = prev_bpage) {
-
- prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
- buf_page_state state = buf_page_get_state(bpage);
-
- ut_ad(buf_page_in_file(bpage));
- ut_ad(bpage->in_LRU_list);
+ ut_ad(this == &buf_pool);
+ if (!is_initialised())
+ return;
- if (state != BUF_BLOCK_FILE_PAGE) {
- /* We must not have any dirty block except
- when doing a fast shutdown. */
- ut_ad(state == BUF_BLOCK_ZIP_PAGE
- || srv_fast_shutdown == 2);
- buf_page_free_descriptor(bpage);
- }
- }
+ mutex_free(&mutex);
+ mutex_free(&zip_mutex);
+ mutex_free(&flush_list_mutex);
- ut_free(buf_pool->watch);
- buf_pool->watch = NULL;
+ if (flush_rbt)
+ {
+ rbt_free(flush_rbt);
+ flush_rbt= nullptr;
+ }
- chunks = buf_pool->chunks;
- chunk = chunks + buf_pool->n_chunks;
+ for (buf_page_t *bpage= UT_LIST_GET_LAST(LRU), *prev_bpage= nullptr; bpage;
+ bpage= prev_bpage)
+ {
+ prev_bpage= UT_LIST_GET_PREV(LRU, bpage);
+ buf_page_state state= buf_page_get_state(bpage);
+
+ ut_ad(buf_page_in_file(bpage));
+ ut_ad(bpage->in_LRU_list);
+
+ if (state != BUF_BLOCK_FILE_PAGE)
+ {
+ /* We must not have any dirty block except during a fast shutdown. */
+ ut_ad(state == BUF_BLOCK_ZIP_PAGE || srv_fast_shutdown == 2);
+ buf_page_free_descriptor(bpage);
+ }
+ }
- while (--chunk >= chunks) {
- buf_block_t* block = chunk->blocks;
+ ut_free(watch);
+ watch= nullptr;
- for (ulint i = chunk->size; i--; block++) {
- buf_block_free_mutexes(block);
- }
+ for (auto chunk= chunks + n_chunks; --chunk >= chunks; )
+ {
+ buf_block_t *block= chunk->blocks;
- buf_pool->allocator.deallocate_large_dodump(
- chunk->mem, &chunk->mem_pfx, chunk->mem_size());
- }
+ for (auto i= chunk->size; i--; block++)
+ buf_block_free_mutexes(block);
- for (ulint i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; ++i) {
- os_event_destroy(buf_pool->no_flush[i]);
- }
+ allocator.deallocate_large_dodump(chunk->mem, &chunk->mem_pfx,
+ chunk->mem_size());
+ }
- ut_free(buf_pool->chunks);
- ha_clear(buf_pool->page_hash);
- hash_table_free(buf_pool->page_hash);
- hash_table_free(buf_pool->zip_hash);
+ for (ulint i= BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; ++i)
+ os_event_destroy(no_flush[i]);
- buf_pool->io_buf.~io_buf_t();
- buf_pool->allocator.~ut_allocator();
- UT_DELETE(buf_chunk_map_reg);
- buf_chunk_map_reg = buf_chunk_map_ref = NULL;
+ ut_free(chunks);
+ chunks= nullptr;
+ ha_clear(page_hash);
+ hash_table_free(page_hash);
+ hash_table_free(zip_hash);
- ut_free(buf_pool);
- buf_pool = NULL;
+ io_buf.close();
+ UT_DELETE(chunk_t::map_reg);
+ chunk_t::map_reg= chunk_t::map_ref= nullptr;
}
-/** Reallocate a control block.
-@param[in] block pointer to control block
-@retval false if failed because of no free blocks. */
-static bool buf_page_realloc(buf_block_t* block)
+/** Try to reallocate a control block.
+@param block control block to reallocate
+@return whether the reallocation succeeded */
+inline bool buf_pool_t::realloc(buf_block_t *block)
{
buf_block_t* new_block;
- ut_ad(buf_pool_withdrawing);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(withdrawing);
+ ut_ad(mutex_own(&mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
new_block = buf_LRU_get_free_only();
if (new_block == NULL) {
- return(false); /* buf_pool->free list was not enough */
+ return(false); /* free list was not enough */
}
rw_lock_t* hash_lock = buf_page_hash_lock_get(block->page.id);
@@ -1883,16 +1719,16 @@ static bool buf_page_realloc(buf_block_t* block)
buf_LRU_adjust_hp(&block->page);
buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, &block->page);
- UT_LIST_REMOVE(buf_pool->LRU, &block->page);
+ UT_LIST_REMOVE(LRU, &block->page);
if (prev_b != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->LRU, prev_b, &new_block->page);
+ UT_LIST_INSERT_AFTER(LRU, prev_b, &new_block->page);
} else {
- UT_LIST_ADD_FIRST(buf_pool->LRU, &new_block->page);
+ UT_LIST_ADD_FIRST(LRU, &new_block->page);
}
- if (buf_pool->LRU_old == &block->page) {
- buf_pool->LRU_old = &new_block->page;
+ if (LRU_old == &block->page) {
+ LRU_old = &new_block->page;
}
ut_ad(new_block->page.in_LRU_list);
@@ -1905,29 +1741,29 @@ static bool buf_page_realloc(buf_block_t* block)
page_zip_get_size(&new_block->page.zip));
buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, block);
- UT_LIST_REMOVE(buf_pool->unzip_LRU, block);
+ UT_LIST_REMOVE(unzip_LRU, block);
ut_d(block->in_unzip_LRU_list = FALSE);
block->page.zip.data = NULL;
page_zip_set_size(&block->page.zip, 0);
if (prev_block != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->unzip_LRU, prev_block, new_block);
+ UT_LIST_INSERT_AFTER(unzip_LRU, prev_block, new_block);
} else {
- UT_LIST_ADD_FIRST(buf_pool->unzip_LRU, new_block);
+ UT_LIST_ADD_FIRST(unzip_LRU, new_block);
}
} else {
ut_ad(!block->in_unzip_LRU_list);
ut_d(new_block->in_unzip_LRU_list = FALSE);
}
- /* relocate buf_pool->page_hash */
+ /* relocate page_hash */
ut_ad(block->page.in_page_hash);
ut_ad(&block->page == buf_page_hash_get_low(block->page.id));
ut_d(block->page.in_page_hash = FALSE);
ulint fold = block->page.id.fold();
ut_ad(fold == new_block->page.id.fold());
- HASH_REPLACE(buf_page_t, hash, buf_pool->page_hash, fold,
+ HASH_REPLACE(buf_page_t, hash, page_hash, fold,
&block->page, &new_block->page);
ut_ad(new_block->page.in_page_hash);
@@ -1944,7 +1780,7 @@ static bool buf_page_realloc(buf_block_t* block)
block->page.id
= page_id_t(ULINT32_UNDEFINED, ULINT32_UNDEFINED);
- /* Relocate buf_pool->flush_list. */
+ /* Relocate flush_list. */
if (block->page.oldest_modification) {
buf_flush_relocate_on_flush_list(
&block->page, &new_block->page);
@@ -1953,7 +1789,7 @@ static bool buf_page_realloc(buf_block_t* block)
/* set other flags of buf_block_t */
#ifdef BTR_CUR_HASH_ADAPT
- /* This code should only be executed by buf_pool_resize(),
+ /* This code should only be executed by resize(),
while the adaptive hash index is disabled. */
assert_block_ahi_empty(block);
assert_block_ahi_empty_on_init(new_block);
@@ -2015,84 +1851,31 @@ buf_resize_status(
ib::info() << export_vars.innodb_buffer_pool_resize_status;
}
-/** Determines if a block is intended to be withdrawn.
-@param[in] block pointer to control block
-@retval true if will be withdrawn */
-bool buf_block_will_be_withdrawn(const buf_block_t* block)
-{
- ut_ad(buf_pool->curr_size < buf_pool->old_size);
- ut_ad(!buf_pool_resizing || mutex_own(&buf_pool->mutex));
-
- const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
- const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
-
- while (chunk < echunk) {
- if (block >= chunk->blocks
- && block < chunk->blocks + chunk->size) {
- return(true);
- }
- ++chunk;
- }
-
- return(false);
-}
-
-/** Determines if a frame is intended to be withdrawn.
-@param[in] ptr pointer to a frame
-@retval true if will be withdrawn */
-bool
-buf_frame_will_be_withdrawn(const byte* ptr)
-{
- ut_ad(buf_pool->curr_size < buf_pool->old_size);
- ut_ad(!buf_pool_resizing || mutex_own(&buf_pool->mutex));
-
- const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
- const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
-
- while (chunk < echunk) {
- if (ptr >= chunk->blocks->frame
- && ptr < (chunk->blocks + chunk->size - 1)->frame
- + srv_page_size) {
- return(true);
- }
- ++chunk;
- }
-
- return(false);
-}
-
-/** Withdraw the buffer pool blocks from the end of the buffer pool
-until withdrawn by buf_pool->withdraw_target.
-@retval true if retry is needed */
-static bool buf_pool_withdraw_blocks()
+/** Withdraw blocks from the buffer pool until meeting withdraw_target.
+@return whether retry is needed */
+inline bool buf_pool_t::withdraw_blocks()
{
buf_block_t* block;
ulint loop_count = 0;
ib::info() << "start to withdraw the last "
- << buf_pool->withdraw_target << " blocks";
+ << withdraw_target << " blocks";
- /* Minimize buf_pool->zip_free[i] lists */
- mutex_enter(&buf_pool->mutex);
+ /* Minimize zip_free[i] lists */
+ mutex_enter(&mutex);
buf_buddy_condense_free();
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&mutex);
- while (UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ while (UT_LIST_GET_LEN(withdraw) < withdraw_target) {
/* try to withdraw from free_list */
ulint count1 = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&mutex);
block = reinterpret_cast<buf_block_t*>(
- UT_LIST_GET_FIRST(buf_pool->free));
+ UT_LIST_GET_FIRST(free));
while (block != NULL
- && UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ && UT_LIST_GET_LEN(withdraw) < withdraw_target) {
ut_ad(block->page.in_free_list);
ut_ad(!block->page.in_flush_list);
ut_ad(!block->page.in_LRU_list);
@@ -2103,36 +1886,31 @@ static bool buf_pool_withdraw_blocks()
UT_LIST_GET_NEXT(
list, &block->page));
- if (buf_block_will_be_withdrawn(block)) {
+ if (buf_pool.will_be_withdrawn(block->page)) {
/* This should be withdrawn */
- UT_LIST_REMOVE(
- buf_pool->free,
- &block->page);
- UT_LIST_ADD_LAST(
- buf_pool->withdraw,
- &block->page);
+ UT_LIST_REMOVE(free, &block->page);
+ UT_LIST_ADD_LAST(withdraw, &block->page);
ut_d(block->in_withdraw_list = TRUE);
count1++;
}
block = next_block;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&mutex);
/* reserve free_list length */
- if (UT_LIST_GET_LEN(buf_pool->withdraw)
- < buf_pool->withdraw_target) {
+ if (UT_LIST_GET_LEN(withdraw) < withdraw_target) {
ulint scan_depth;
flush_counters_t n;
/* cap scan_depth with current LRU size. */
- mutex_enter(&buf_pool->mutex);
- scan_depth = UT_LIST_GET_LEN(buf_pool->LRU);
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&mutex);
+ scan_depth = UT_LIST_GET_LEN(LRU);
+ mutex_exit(&mutex);
scan_depth = ut_min(
- ut_max(buf_pool->withdraw_target
- - UT_LIST_GET_LEN(buf_pool->withdraw),
+ ut_max(withdraw_target
+ - UT_LIST_GET_LEN(withdraw),
static_cast<ulint>(srv_LRU_scan_depth)),
scan_depth);
@@ -2151,9 +1929,9 @@ static bool buf_pool_withdraw_blocks()
/* relocate blocks/buddies in withdrawn area */
ulint count2 = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&mutex);
buf_page_t* bpage;
- bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ bpage = UT_LIST_GET_FIRST(LRU);
while (bpage != NULL) {
BPageMutex* block_mutex;
buf_page_t* next_bpage;
@@ -2164,7 +1942,7 @@ static bool buf_pool_withdraw_blocks()
next_bpage = UT_LIST_GET_NEXT(LRU, bpage);
if (bpage->zip.data != NULL
- && buf_frame_will_be_withdrawn(bpage->zip.data)
+ && will_be_withdrawn(bpage->zip.data)
&& buf_page_can_relocate(bpage)) {
mutex_exit(block_mutex);
buf_pool_mutex_exit_forbid();
@@ -2182,13 +1960,11 @@ static bool buf_pool_withdraw_blocks()
if (buf_page_get_state(bpage)
== BUF_BLOCK_FILE_PAGE
- && buf_block_will_be_withdrawn(
- reinterpret_cast<buf_block_t*>(bpage))) {
-
+ && buf_pool.will_be_withdrawn(*bpage)) {
if (buf_page_can_relocate(bpage)) {
mutex_exit(block_mutex);
buf_pool_mutex_exit_forbid();
- if(!buf_page_realloc(
+ if (!realloc(
reinterpret_cast<buf_block_t*>(
bpage))) {
/* failed to allocate block */
@@ -2208,18 +1984,18 @@ static bool buf_pool_withdraw_blocks()
bpage = next_bpage;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&mutex);
buf_resize_status(
"withdrawing blocks. (" ULINTPF "/" ULINTPF ")",
- UT_LIST_GET_LEN(buf_pool->withdraw),
- buf_pool->withdraw_target);
+ UT_LIST_GET_LEN(withdraw),
+ withdraw_target);
ib::info() << "withdrew "
<< count1 << " blocks from free list."
<< " Tried to relocate " << count2 << " pages ("
- << UT_LIST_GET_LEN(buf_pool->withdraw) << "/"
- << buf_pool->withdraw_target << ")";
+ << UT_LIST_GET_LEN(withdraw) << "/"
+ << withdraw_target << ")";
if (++loop_count >= 10) {
/* give up for now.
@@ -2233,29 +2009,20 @@ static bool buf_pool_withdraw_blocks()
}
/* confirm withdrawn enough */
- const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
- const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
-
- while (chunk < echunk) {
+ for (const chunk_t* chunk = chunks + n_chunks_new,
+ * const echunk = chunks + n_chunks; chunk != echunk; chunk++) {
block = chunk->blocks;
for (ulint j = chunk->size; j--; block++) {
- /* If !=BUF_BLOCK_NOT_USED block in the
- withdrawn area, it means corruption
- something */
- ut_a(buf_block_get_state(block)
- == BUF_BLOCK_NOT_USED);
+ ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
ut_ad(block->in_withdraw_list);
}
- ++chunk;
}
- ib::info() << "withdrawn target: "
- << UT_LIST_GET_LEN(buf_pool->withdraw) << " blocks";
+ ib::info() << "withdrawn target: " << UT_LIST_GET_LEN(withdraw)
+ << " blocks";
/* retry is not needed */
- ++buf_withdraw_clock;
+ ++withdraw_clock_;
return(false);
}
@@ -2265,18 +2032,18 @@ static void buf_pool_resize_hash()
{
hash_table_t* new_hash_table;
- ut_ad(buf_pool->page_hash_old == NULL);
+ ut_ad(buf_pool.page_hash_old == NULL);
/* recreate page_hash */
new_hash_table = ib_recreate(
- buf_pool->page_hash, 2 * buf_pool->curr_size);
+ buf_pool.page_hash, 2 * buf_pool.curr_size);
- for (ulint i = 0; i < hash_get_n_cells(buf_pool->page_hash); i++) {
+ for (ulint i = 0; i < hash_get_n_cells(buf_pool.page_hash); i++) {
buf_page_t* bpage;
bpage = static_cast<buf_page_t*>(
HASH_GET_FIRST(
- buf_pool->page_hash, i));
+ buf_pool.page_hash, i));
while (bpage) {
buf_page_t* prev_bpage = bpage;
@@ -2289,7 +2056,7 @@ static void buf_pool_resize_hash()
fold = prev_bpage->id.fold();
HASH_DELETE(buf_page_t, hash,
- buf_pool->page_hash, fold,
+ buf_pool.page_hash, fold,
prev_bpage);
HASH_INSERT(buf_page_t, hash,
@@ -2298,17 +2065,17 @@ static void buf_pool_resize_hash()
}
}
- buf_pool->page_hash_old = buf_pool->page_hash;
- buf_pool->page_hash = new_hash_table;
+ buf_pool.page_hash_old = buf_pool.page_hash;
+ buf_pool.page_hash = new_hash_table;
/* recreate zip_hash */
- new_hash_table = hash_create(2 * buf_pool->curr_size);
+ new_hash_table = hash_create(2 * buf_pool.curr_size);
- for (ulint i = 0; i < hash_get_n_cells(buf_pool->zip_hash); i++) {
+ for (ulint i = 0; i < hash_get_n_cells(buf_pool.zip_hash); i++) {
buf_page_t* bpage;
bpage = static_cast<buf_page_t*>(
- HASH_GET_FIRST(buf_pool->zip_hash, i));
+ HASH_GET_FIRST(buf_pool.zip_hash, i));
while (bpage) {
buf_page_t* prev_bpage = bpage;
@@ -2323,7 +2090,7 @@ static void buf_pool_resize_hash()
prev_bpage));
HASH_DELETE(buf_page_t, hash,
- buf_pool->zip_hash, fold,
+ buf_pool.zip_hash, fold,
prev_bpage);
HASH_INSERT(buf_page_t, hash,
@@ -2332,37 +2099,20 @@ static void buf_pool_resize_hash()
}
}
- hash_table_free(buf_pool->zip_hash);
- buf_pool->zip_hash = new_hash_table;
+ hash_table_free(buf_pool.zip_hash);
+ buf_pool.zip_hash = new_hash_table;
}
-#ifndef DBUG_OFF
-/** This is a debug routine to inject an memory allocation failure error. */
-static
-void
-buf_pool_resize_chunk_make_null(buf_chunk_t** new_chunks)
+/** Resize from srv_buf_pool_old_size to srv_buf_pool_size. */
+inline void buf_pool_t::resize()
{
- static int count = 0;
-
- if (count == 1) {
- ut_free(*new_chunks);
- *new_chunks = NULL;
- }
-
- count++;
-}
-#endif // DBUG_OFF
+ ut_ad(this == &buf_pool);
-/** Resize the buffer pool based on srv_buf_pool_size from
-srv_buf_pool_old_size. */
-static void buf_pool_resize()
-{
bool warning = false;
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
- ut_ad(!buf_pool_resizing);
- ut_ad(!buf_pool_withdrawing);
+ ut_ad(!resize_in_progress());
ut_ad(srv_buf_pool_chunk_unit > 0);
ulint new_instance_size = srv_buf_pool_size >> srv_page_size_shift;
@@ -2372,16 +2122,16 @@ static void buf_pool_resize()
srv_buf_pool_old_size, srv_buf_pool_size,
srv_buf_pool_chunk_unit);
- mutex_enter(&buf_pool->mutex);
- ut_ad(buf_pool->curr_size == buf_pool->old_size);
- ut_ad(buf_pool->n_chunks_new == buf_pool->n_chunks);
- ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0);
- ut_ad(buf_pool->flush_rbt == NULL);
+ mutex_enter(&mutex);
+ ut_ad(curr_size == old_size);
+ ut_ad(n_chunks_new == n_chunks);
+ ut_ad(UT_LIST_GET_LEN(withdraw) == 0);
+ ut_ad(flush_rbt == NULL);
- buf_pool->n_chunks_new = (new_instance_size << srv_page_size_shift)
+ n_chunks_new = (new_instance_size << srv_page_size_shift)
/ srv_buf_pool_chunk_unit;
- buf_pool->curr_size = buf_pool->n_chunks_new * buf_pool->chunks->size;
- mutex_exit(&buf_pool->mutex);
+ curr_size = n_chunks_new * chunks->size;
+ mutex_exit(&mutex);
#ifdef BTR_CUR_HASH_ADAPT
/* disable AHI if needed */
@@ -2404,23 +2154,18 @@ static void buf_pool_resize()
}
#endif /* BTR_CUR_HASH_ADAPT */
- if (buf_pool->curr_size < buf_pool->old_size) {
+ if (curr_size < old_size) {
/* set withdraw target */
- ulint withdraw_target = 0;
+ size_t w = 0;
- const buf_chunk_t* chunk
- = buf_pool->chunks + buf_pool->n_chunks_new;
- const buf_chunk_t* echunk
- = buf_pool->chunks + buf_pool->n_chunks;
-
- while (chunk < echunk) {
- withdraw_target += chunk->size;
- ++chunk;
- }
+ for (const chunk_t* chunk = chunks + n_chunks_new,
+ * const echunk = chunks + n_chunks;
+ chunk != echunk; chunk++)
+ w += chunk->size;
- ut_ad(buf_pool->withdraw_target == 0);
- buf_pool->withdraw_target = withdraw_target;
- buf_pool_withdrawing = true;
+ ut_ad(withdraw_target == 0);
+ withdraw_target = w;
+ withdrawing.store(true, std::memory_order_relaxed);
}
buf_resize_status("Withdrawing blocks to be shrunken.");
@@ -2431,13 +2176,12 @@ static void buf_pool_resize()
withdraw_retry:
/* wait for the number of blocks fit to the new size (if needed)*/
- bool should_retry_withdraw
- = buf_pool->curr_size < buf_pool->old_size
- && buf_pool_withdraw_blocks();
+ bool should_retry_withdraw = curr_size < old_size
+ && withdraw_blocks();
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
/* abort to resize for shutdown. */
- buf_pool_withdrawing = false;
+ withdrawing.store(false, std::memory_order_relaxed);
return;
}
@@ -2499,7 +2243,7 @@ withdraw_retry:
goto withdraw_retry;
}
- buf_pool_withdrawing = false;
+ withdrawing.store(false, std::memory_order_relaxed);
buf_resize_status("Latching whole of buffer pool.");
@@ -2521,23 +2265,22 @@ withdraw_retry:
}
/* Indicate critical path */
- buf_pool_resizing = true;
+ resizing.store(true, std::memory_order_relaxed);
- mutex_enter(&buf_pool->mutex);
- hash_lock_x_all(buf_pool->page_hash);
- buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t());
+ mutex_enter(&mutex);
+ hash_lock_x_all(page_hash);
+ chunk_t::map_reg = UT_NEW_NOKEY(chunk_t::map());
/* add/delete chunks */
buf_resize_status("buffer pool resizing with chunks "
ULINTPF " to " ULINTPF ".",
- buf_pool->n_chunks, buf_pool->n_chunks_new);
+ n_chunks, n_chunks_new);
- if (buf_pool->n_chunks_new < buf_pool->n_chunks) {
+ if (n_chunks_new < n_chunks) {
/* delete chunks */
- buf_chunk_t* chunk = buf_pool->chunks + buf_pool->n_chunks_new;
- const buf_chunk_t* const echunk = buf_pool->chunks
- + buf_pool->n_chunks;
+ chunk_t* chunk = chunks + n_chunks_new;
+ const chunk_t* const echunk = chunks + n_chunks;
ulint sum_freed = 0;
@@ -2548,7 +2291,7 @@ withdraw_retry:
buf_block_free_mutexes(block);
}
- buf_pool->allocator.deallocate_large_dodump(
+ allocator.deallocate_large_dodump(
chunk->mem, &chunk->mem_pfx,
chunk->mem_size());
sum_freed += chunk->size;
@@ -2556,113 +2299,115 @@ withdraw_retry:
}
/* discard withdraw list */
- UT_LIST_INIT(buf_pool->withdraw, &buf_page_t::list);
- buf_pool->withdraw_target = 0;
+ UT_LIST_INIT(withdraw, &buf_page_t::list);
+ withdraw_target = 0;
- ib::info() << buf_pool->n_chunks - buf_pool->n_chunks_new
+ ib::info() << n_chunks - n_chunks_new
<< " chunks (" << sum_freed
<< " blocks) were freed.";
- buf_pool->n_chunks = buf_pool->n_chunks_new;
+ n_chunks = n_chunks_new;
}
{
- /* reallocate buf_pool->chunks */
- const ulint new_chunks_size
- = buf_pool->n_chunks_new * sizeof(buf_chunk_t);
+ /* reallocate chunks */
+ const size_t new_chunks_size
+ = n_chunks_new * sizeof(chunk_t);
- buf_chunk_t* new_chunks = reinterpret_cast<buf_chunk_t*>(
+ chunk_t* new_chunks = static_cast<chunk_t*>(
ut_zalloc_nokey_nofatal(new_chunks_size));
DBUG_EXECUTE_IF("buf_pool_resize_chunk_null",
- buf_pool_resize_chunk_make_null(&new_chunks););
+ {
+ static int count = 0;
+ if (count++ == 1) {
+ ut_free(new_chunks);
+ new_chunks= nullptr;
+ }
+ });
if (!new_chunks) {
ib::error() << "failed to allocate"
" the chunk array.";
- buf_pool->n_chunks_new = buf_pool->n_chunks;
+ n_chunks_new = n_chunks;
warning = true;
- buf_pool->chunks_old = NULL;
+ chunks_old = NULL;
goto calc_buf_pool_size;
}
- ulint n_chunks_copy = ut_min(buf_pool->n_chunks_new,
- buf_pool->n_chunks);
+ ulint n_chunks_copy = ut_min(n_chunks_new,
+ n_chunks);
- memcpy(new_chunks, buf_pool->chunks,
+ memcpy(new_chunks, chunks,
n_chunks_copy * sizeof *new_chunks);
for (ulint j = 0; j < n_chunks_copy; j++) {
- buf_pool_register_chunk(&new_chunks[j]);
+ new_chunks[j].reg();
}
- buf_pool->chunks_old = buf_pool->chunks;
- buf_pool->chunks = new_chunks;
+ chunks_old = chunks;
+ chunks = new_chunks;
}
- if (buf_pool->n_chunks_new > buf_pool->n_chunks) {
+ if (n_chunks_new > n_chunks) {
/* add chunks */
- buf_chunk_t* chunk = buf_pool->chunks + buf_pool->n_chunks;
- const buf_chunk_t* const echunk = buf_pool->chunks
- + buf_pool->n_chunks_new;
-
ulint sum_added = 0;
- ulint n_chunks = buf_pool->n_chunks;
-
- while (chunk < echunk) {
- ulong unit = srv_buf_pool_chunk_unit;
+ ulint n = n_chunks;
+ const size_t unit = srv_buf_pool_chunk_unit;
- if (!buf_chunk_init(chunk, unit)) {
+ for (chunk_t* chunk = chunks + n_chunks,
+ * const echunk = chunks + n_chunks_new;
+ chunk != echunk; chunk++) {
+ if (!chunk->create(unit)) {
ib::error() << "failed to allocate"
" memory for buffer pool chunk";
warning = true;
- buf_pool->n_chunks_new = n_chunks;
+ n_chunks_new = n_chunks;
break;
}
sum_added += chunk->size;
- ++n_chunks;
- ++chunk;
+ ++n;
}
- ib::info() << buf_pool->n_chunks_new - buf_pool->n_chunks
+ ib::info() << n_chunks_new - n_chunks
<< " chunks (" << sum_added
<< " blocks) were added.";
- buf_pool->n_chunks = n_chunks;
+ n_chunks = n;
}
calc_buf_pool_size:
- /* recalc buf_pool->curr_size */
+ /* recalc curr_size */
ulint new_size = 0;
{
- buf_chunk_t* chunk = buf_pool->chunks;
- const buf_chunk_t* const echunk = chunk + buf_pool->n_chunks;
+ chunk_t* chunk = chunks;
+ const chunk_t* const echunk = chunk + n_chunks;
do {
new_size += chunk->size;
} while (++chunk != echunk);
}
- buf_pool->curr_size = new_size;
- buf_pool->n_chunks_new = buf_pool->n_chunks;
+ curr_size = new_size;
+ n_chunks_new = n_chunks;
- if (buf_pool->chunks_old) {
- ut_free(buf_pool->chunks_old);
- buf_pool->chunks_old = NULL;
+ if (chunks_old) {
+ ut_free(chunks_old);
+ chunks_old = NULL;
}
- buf_pool_chunk_map_t* chunk_map_old = buf_chunk_map_ref;
- buf_chunk_map_ref = buf_chunk_map_reg;
+ chunk_t::map* chunk_map_old = chunk_t::map_ref;
+ chunk_t::map_ref = chunk_t::map_reg;
/* set size */
- ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0);
- buf_pool->read_ahead_area = ut_min(
+ ut_ad(UT_LIST_GET_LEN(withdraw) == 0);
+ read_ahead_area = ut_min(
BUF_READ_AHEAD_PAGES,
- ut_2_power_up(buf_pool->curr_size / BUF_READ_AHEAD_PORTION));
- buf_pool->curr_pool_size = buf_pool->n_chunks * srv_buf_pool_chunk_unit;
- srv_buf_pool_curr_size = buf_pool->curr_pool_size;/* FIXME: remove*/
- buf_pool->old_size = buf_pool->curr_size;
+ ut_2_power_up(curr_size / BUF_READ_AHEAD_PORTION));
+ curr_pool_size = n_chunks * srv_buf_pool_chunk_unit;
+ srv_buf_pool_curr_size = curr_pool_size;/* FIXME: remove*/
+ old_size = curr_size;
innodb_set_buf_pool_size(buf_pool_size_align(srv_buf_pool_curr_size));
const bool new_size_too_diff
@@ -2677,17 +2422,17 @@ calc_buf_pool_size:
ib::info() << "hash tables were resized";
}
- hash_unlock_x_all(buf_pool->page_hash);
- mutex_exit(&buf_pool->mutex);
+ hash_unlock_x_all(page_hash);
+ mutex_exit(&mutex);
- if (buf_pool->page_hash_old != NULL) {
- hash_table_free(buf_pool->page_hash_old);
- buf_pool->page_hash_old = NULL;
+ if (page_hash_old != NULL) {
+ hash_table_free(page_hash_old);
+ page_hash_old = NULL;
}
UT_DELETE(chunk_map_old);
- buf_pool_resizing = false;
+ resizing.store(false, std::memory_order_relaxed);
/* Normalize other components, if the new size is too different */
if (!warning && new_size_too_diff) {
@@ -2744,7 +2489,7 @@ calc_buf_pool_size:
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- buf_validate();
+ validate();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
return;
@@ -2754,13 +2499,13 @@ calc_buf_pool_size:
static void buf_resize_callback(void *)
{
ut_a(srv_shutdown_state == SRV_SHUTDOWN_NONE);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
const auto size= srv_buf_pool_size;
const bool work= srv_buf_pool_old_size != size;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (work)
- buf_pool_resize();
+ buf_pool.resize();
else
{
std::ostringstream sout;
@@ -2785,63 +2530,9 @@ void buf_resize_shutdown()
}
-#ifdef BTR_CUR_HASH_ADAPT
-/** Clear the adaptive hash index on all pages in the buffer pool. */
-void buf_pool_clear_hash_index()
-{
- ut_ad(btr_search_own_all(RW_LOCK_X));
- ut_ad(!buf_pool_resizing);
- ut_ad(!btr_search_enabled);
-
- buf_chunk_t* chunks = buf_pool->chunks;
- buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
-
- while (--chunk >= chunks) {
- buf_block_t* block = chunk->blocks;
- ulint i = chunk->size;
-
- for (; i--; block++) {
- dict_index_t* index = block->index;
- assert_block_ahi_valid(block);
-
- /* We can set block->index = NULL
- and block->n_pointers = 0
- when btr_search_own_all(RW_LOCK_X);
- see the comments in buf0buf.h */
-
- if (!index) {
-# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- ut_a(!block->n_pointers);
-# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- continue;
- }
-
- ut_d(buf_page_state state
- = buf_block_get_state(block));
- /* Another thread may have set the
- state to BUF_BLOCK_REMOVE_HASH in
- buf_LRU_block_remove_hashed().
-
- The state change in buf_page_realloc()
- is not observable here, because in
- that case we would have !block->index.
-
- In the end, the entire adaptive hash
- index will be removed. */
- ut_ad(state == BUF_BLOCK_FILE_PAGE
- || state == BUF_BLOCK_REMOVE_HASH);
-# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- block->n_pointers = 0;
-# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- block->index = NULL;
- }
- }
-}
-#endif /* BTR_CUR_HASH_ADAPT */
-
/********************************************************************//**
Relocate a buffer control block. Relocates the block on the LRU list
-and in buf_pool->page_hash. Does not relocate bpage->list.
+and in buf_pool.page_hash. Does not relocate bpage->list.
The caller must take care of relocating bpage->list. */
static
void
@@ -2854,7 +2545,7 @@ buf_relocate(
{
buf_page_t* b;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_hash_lock_held_x(bpage));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
@@ -2888,26 +2579,26 @@ buf_relocate(
ut_d(bpage->in_LRU_list = FALSE);
ut_d(bpage->in_page_hash = FALSE);
- /* relocate buf_pool->LRU */
+ /* relocate buf_pool.LRU */
b = UT_LIST_GET_PREV(LRU, bpage);
- UT_LIST_REMOVE(buf_pool->LRU, bpage);
+ UT_LIST_REMOVE(buf_pool.LRU, bpage);
if (b != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->LRU, b, dpage);
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, b, dpage);
} else {
- UT_LIST_ADD_FIRST(buf_pool->LRU, dpage);
+ UT_LIST_ADD_FIRST(buf_pool.LRU, dpage);
}
- if (UNIV_UNLIKELY(buf_pool->LRU_old == bpage)) {
- buf_pool->LRU_old = dpage;
+ if (UNIV_UNLIKELY(buf_pool.LRU_old == bpage)) {
+ buf_pool.LRU_old = dpage;
#ifdef UNIV_LRU_DEBUG
- /* buf_pool->LRU_old must be the first item in the LRU list
+ /* buf_pool.LRU_old must be the first item in the LRU list
whose "old" flag is set. */
- ut_a(buf_pool->LRU_old->old);
- ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
- || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
- ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
- || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
+ ut_a(buf_pool.LRU_old->old);
+ ut_a(!UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)
+ || !UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)->old);
+ ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)
+ || UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)->old);
} else {
/* Check that the "old" flag is consistent in
the block and its neighbours. */
@@ -2917,60 +2608,13 @@ buf_relocate(
ut_d(CheckInLRUList::validate());
- /* relocate buf_pool->page_hash */
+ /* relocate buf_pool.page_hash */
ulint fold = bpage->id.fold();
ut_ad(fold == dpage->id.fold());
- HASH_REPLACE(buf_page_t, hash, buf_pool->page_hash, fold, bpage,
+ HASH_REPLACE(buf_page_t, hash, buf_pool.page_hash, fold, bpage,
dpage);
}
-/** Hazard Pointer implementation. */
-
-/** Set current value
-@param bpage buffer block to be set as hp */
-void
-HazardPointer::set(buf_page_t* bpage)
-{
- ut_ad(mutex_own(m_mutex));
- ut_ad(!bpage || buf_page_in_file(bpage));
-
- m_hp = bpage;
-}
-
-/** Adjust the value of hp. This happens when some other thread working
-on the same list attempts to remove the hp from the list.
-@param bpage buffer block to be compared */
-
-void
-FlushHp::adjust(const buf_page_t* bpage)
-{
- ut_ad(bpage != NULL);
-
- /** We only support reverse traversal for now. */
- if (is_hp(bpage)) {
- m_hp = UT_LIST_GET_PREV(list, m_hp);
- }
-
- ut_ad(!m_hp || m_hp->in_flush_list);
-}
-
-/** Adjust the value of hp. This happens when some other thread working
-on the same list attempts to remove the hp from the list.
-@param bpage buffer block to be compared */
-
-void
-LRUHp::adjust(const buf_page_t* bpage)
-{
- ut_ad(bpage);
-
- /** We only support reverse traversal for now. */
- if (is_hp(bpage)) {
- m_hp = UT_LIST_GET_PREV(LRU, m_hp);
- }
-
- ut_ad(!m_hp || m_hp->in_LRU_list);
-}
-
/** Determine if a block is a sentinel for a buffer pool watch.
@param[in] bpage block
@return whether bpage a sentinel for a buffer pool watch */
@@ -2980,8 +2624,8 @@ bool buf_pool_watch_is_sentinel(const buf_page_t* bpage)
ut_ad(buf_page_hash_lock_held_s_or_x(bpage));
ut_ad(buf_page_in_file(bpage));
- if (bpage < &buf_pool->watch[0]
- || bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
+ if (bpage < &buf_pool.watch[0]
+ || bpage >= &buf_pool.watch[BUF_POOL_WATCH_SIZE]) {
ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
|| bpage->zip.data != NULL);
@@ -3034,27 +2678,27 @@ page_found:
hash_locks. buf_pool mutex is needed because any changes to
the page_hash must be covered by it and hash_locks are needed
because we don't want to read any stale information in
- buf_pool->watch[]. However, it is not in the critical code path
+ buf_pool.watch[]. However, it is not in the critical code path
as this function will be called only by the purge thread. */
/* To obey latching order first release the hash_lock. */
rw_lock_x_unlock(*hash_lock);
- mutex_enter(&buf_pool->mutex);
- hash_lock_x_all(buf_pool->page_hash);
+ mutex_enter(&buf_pool.mutex);
+ hash_lock_x_all(buf_pool.page_hash);
/* We have to recheck that the page
was not loaded or a watch set by some other
purge thread. This is because of the small
time window between when we release the
- hash_lock to acquire buf_pool->mutex above. */
+ hash_lock to acquire buf_pool.mutex above. */
*hash_lock = buf_page_hash_lock_get(page_id);
bpage = buf_page_hash_get_low(page_id);
if (UNIV_LIKELY_NULL(bpage)) {
- mutex_exit(&buf_pool->mutex);
- hash_unlock_x_all_but(buf_pool->page_hash, *hash_lock);
+ mutex_exit(&buf_pool.mutex);
+ hash_unlock_x_all_but(buf_pool.page_hash, *hash_lock);
goto page_found;
}
@@ -3062,7 +2706,7 @@ page_found:
BUF_POOL_WATCH_SIZE. So there is no way for a purge task
to hold a watch when setting another watch. */
for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
- bpage = &buf_pool->watch[i];
+ bpage = &buf_pool.watch[i];
ut_ad(bpage->access_time == 0);
ut_ad(bpage->oldest_modification == 0);
@@ -3074,24 +2718,24 @@ page_found:
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->buf_fix_count == 0);
- /* bpage is pointing to buf_pool->watch[],
- which is protected by buf_pool->mutex.
+ /* bpage is pointing to buf_pool.watch[],
+ which is protected by buf_pool.mutex.
Normally, buf_page_t objects are protected by
- buf_block_t::mutex or buf_pool->zip_mutex or both. */
+ buf_block_t::mutex or buf_pool.zip_mutex or both. */
bpage->state = BUF_BLOCK_ZIP_PAGE;
bpage->id = page_id;
bpage->buf_fix_count = 1;
ut_d(bpage->in_page_hash = TRUE);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
page_id.fold(), bpage);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Once the sentinel is in the page_hash we can
safely release all locks except just the
relevant hash_lock */
- hash_unlock_x_all_but(buf_pool->page_hash,
+ hash_unlock_x_all_but(buf_pool.page_hash,
*hash_lock);
return(NULL);
@@ -3129,9 +2773,9 @@ buf_pool_watch_remove(buf_page_t* watch)
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X));
#endif /* UNIV_DEBUG */
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, watch->id.fold(),
+ HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, watch->id.fold(),
watch);
ut_d(watch->in_page_hash = FALSE);
watch->buf_fix_count = 0;
@@ -3144,12 +2788,12 @@ buf_pool_watch_set(same_page_id) must have returned NULL before.
void buf_pool_watch_unset(const page_id_t page_id)
{
buf_page_t* bpage;
- /* We only need to have buf_pool->mutex in case where we end
+ /* We only need to have buf_pool.mutex in case where we end
up calling buf_pool_watch_remove but to obey latching order
we acquire it here before acquiring hash_lock. This should
not cause too much grief as this function is only ever
called from the purge thread. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
rw_lock_t* hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
@@ -3162,7 +2806,7 @@ void buf_pool_watch_unset(const page_id_t page_id)
buf_pool_watch_remove(bpage);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(hash_lock);
}
@@ -3199,13 +2843,13 @@ the buffer pool.
@param[in,out] bpage buffer block of a file page */
void buf_page_make_young(buf_page_t* bpage)
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
ut_a(buf_page_in_file(bpage));
buf_LRU_make_block_young(bpage);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Mark the page status as FREED for the given tablespace id and
@@ -3225,7 +2869,7 @@ void buf_page_free(const page_id_t page_id,
{
ut_ad(mtr);
ut_ad(mtr->is_active());
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
rw_lock_t *hash_lock= buf_page_hash_lock_get(page_id);
rw_lock_s_lock(hash_lock);
@@ -3280,7 +2924,7 @@ static void buf_block_try_discard_uncompressed(const page_id_t page_id)
first release the page_hash mutex. This means that the
block in question can move out of page_hash. Therefore
we need to check again if the block is still in page_hash. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(page_id);
@@ -3288,7 +2932,7 @@ static void buf_block_try_discard_uncompressed(const page_id_t page_id)
buf_LRU_free_page(bpage, false);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Get read access to a compressed page (usually of type
@@ -3311,7 +2955,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
ut_ad(zip_size);
ut_ad(ut_is_2pow(zip_size));
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
for (;;) {
lookup:
@@ -3337,7 +2981,7 @@ lookup:
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
}
@@ -3356,7 +3000,7 @@ err_exit:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
bpage->fix();
- block_mutex = &buf_pool->zip_mutex;
+ block_mutex = &buf_pool.zip_mutex;
goto got_block;
case BUF_BLOCK_FILE_PAGE:
/* Discard the uncompressed page frame if possible. */
@@ -3394,7 +3038,7 @@ got_block:
buf_page_make_young_if_needed(bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
ut_a(bpage->buf_fix_count > 0);
ut_a(buf_page_in_file(bpage));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -3543,86 +3187,6 @@ err_exit:
return(FALSE);
}
-#ifdef BTR_CUR_HASH_ADAPT
-/** Get a buffer block from an adaptive hash index pointer.
-This function does not return if the block is not identified.
-@param[in] ptr pointer to within a page frame
-@return pointer to block, never NULL */
-buf_block_t*
-buf_block_from_ahi(const byte* ptr)
-{
- buf_pool_chunk_map_t::iterator it;
-
- buf_pool_chunk_map_t* chunk_map = buf_chunk_map_ref;
- ut_ad(buf_chunk_map_ref == buf_chunk_map_reg);
- ut_ad(!buf_pool_resizing);
-
- buf_chunk_t* chunk;
- it = chunk_map->upper_bound(ptr);
-
- ut_a(it != chunk_map->begin());
-
- if (it == chunk_map->end()) {
- chunk = chunk_map->rbegin()->second;
- } else {
- chunk = (--it)->second;
- }
-
- ulint offs = ulint(ptr - chunk->blocks->frame);
-
- offs >>= srv_page_size_shift;
-
- ut_a(offs < chunk->size);
-
- buf_block_t* block = &chunk->blocks[offs];
-
- /* The function buf_chunk_init() invokes buf_block_init() so that
- block[n].frame == block->frame + n * srv_page_size. Check it. */
- ut_ad(block->frame == page_align(ptr));
- /* Read the state of the block without holding a mutex.
- A state transition from BUF_BLOCK_FILE_PAGE to
- BUF_BLOCK_REMOVE_HASH is possible during this execution. */
- ut_d(const buf_page_state state = buf_block_get_state(block));
- ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
- return(block);
-}
-#endif /* BTR_CUR_HASH_ADAPT */
-
-/** Determine if a pointer belongs to a buf_block_t. It can be a pointer to
-the buf_block_t itself or a member of it.
-@param ptr a pointer that will not be dereferenced
-@return whether the ptr belongs to a buf_block_t struct */
-bool buf_pointer_is_block_field(const void* ptr)
-{
- const buf_chunk_t* chunk = buf_pool->chunks;
- const buf_chunk_t* const echunk = chunk + ut_min(
- buf_pool->n_chunks, buf_pool->n_chunks_new);
-
- /* TODO: protect buf_pool->chunks with a mutex (the older pointer will
- currently remain while during buf_pool_resize()) */
- while (chunk < echunk) {
- if (ptr >= (void*) chunk->blocks
- && ptr < (void*) (chunk->blocks + chunk->size)) {
-
- return true;
- }
-
- chunk++;
- }
-
- return false;
-}
-
-/** Determine if a buffer block was created by buf_chunk_init().
-@param[in] block block descriptor (not dereferenced)
-@return whether block has been added to buf_pool->free by buf_chunk_init() */
-static bool buf_block_is_uncompressed(const buf_block_t* block)
-{
- /* The pointer should be aligned. */
- return !(ulint(block) % sizeof *block) && buf_pointer_is_block_field(
- reinterpret_cast<const void*>(block));
-}
-
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Return true if probe is enabled.
@@ -3759,7 +3323,7 @@ buf_page_get_gen(
ut_ad(!mtr || !ibuf_inside(mtr)
|| ibuf_page_low(page_id, zip_size, FALSE, file, line, NULL));
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
hash_lock = buf_page_hash_lock_get(page_id);
loop:
block = guess;
@@ -3775,7 +3339,7 @@ loop:
has been allocated by buf_page_alloc_descriptor(),
it may have been freed by buf_relocate(). */
- if (!buf_block_is_uncompressed(block)
+ if (!buf_pool.is_uncompressed(block)
|| page_id != block->page.id
|| buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
/* Our guess was bogus or things have changed
@@ -3929,7 +3493,7 @@ loop:
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
goto loop;
} else {
@@ -3994,14 +3558,14 @@ got_block:
if (UNIV_UNLIKELY(mode == BUF_EVICT_IF_IN_POOL)) {
evict_from_pool:
ut_ad(!fix_block->page.oldest_modification);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
fix_block->unfix();
if (!buf_LRU_free_page(&fix_block->page, true)) {
ut_ad(0);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(NULL);
}
break;
@@ -4049,7 +3613,7 @@ evict_from_pool:
block = buf_LRU_get_free_block();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
hash_lock = buf_page_hash_lock_get(page_id);
@@ -4061,22 +3625,22 @@ evict_from_pool:
fix_block->unfix();
buf_page_mutex_enter(block);
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
fix_block = block;
if (bpage->buf_fix_count > 0
|| buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
/* The block was buffer-fixed or I/O-fixed while
- buf_pool->mutex was not held by this thread.
+ buf_pool.mutex was not held by this thread.
Free the block that was allocated and retry.
This should be extremely unlikely, for example,
if buf_page_get_zip() was invoked. */
buf_LRU_block_free_non_file_page(block);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(hash_lock);
buf_page_mutex_exit(block);
@@ -4105,11 +3669,11 @@ evict_from_pool:
if (buf_page_get_state(&block->page) == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- UT_LIST_REMOVE(buf_pool->zip_clean, &block->page);
+ UT_LIST_REMOVE(buf_pool.zip_clean, &block->page);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
ut_ad(!block->page.in_flush_list);
} else {
- /* Relocate buf_pool->flush_list. */
+ /* Relocate buf_pool.flush_list. */
buf_flush_relocate_on_flush_list(bpage, &block->page);
}
@@ -4127,9 +3691,9 @@ evict_from_pool:
UNIV_MEM_INVALID(bpage, sizeof *bpage);
rw_lock_x_unlock(hash_lock);
- buf_pool->n_pend_unzip++;
- mutex_exit(&buf_pool->zip_mutex);
- mutex_exit(&buf_pool->mutex);
+ buf_pool.n_pend_unzip++;
+ mutex_exit(&buf_pool.zip_mutex);
+ mutex_exit(&buf_pool.mutex);
access_time = buf_page_is_accessed(&block->page);
@@ -4143,16 +3707,16 @@ evict_from_pool:
buf_page_free_descriptor(bpage);
/* Decompress the page while not holding
- buf_pool->mutex or block->mutex. */
+ buf_pool.mutex or block->mutex. */
if (!buf_zip_decompress(block, TRUE)) {
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(fix_block);
buf_block_set_io_fix(fix_block, BUF_IO_NONE);
buf_page_mutex_exit(fix_block);
- --buf_pool->n_pend_unzip;
- mutex_exit(&buf_pool->mutex);
+ --buf_pool.n_pend_unzip;
+ mutex_exit(&buf_pool.mutex);
fix_block->unfix();
rw_lock_x_unlock(&fix_block->lock);
@@ -4162,7 +3726,7 @@ evict_from_pool:
return NULL;
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(fix_block);
@@ -4170,9 +3734,9 @@ evict_from_pool:
buf_page_mutex_exit(fix_block);
- --buf_pool->n_pend_unzip;
+ --buf_pool.n_pend_unzip;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(&block->lock);
@@ -4195,18 +3759,18 @@ evict_from_pool:
/* Try to evict the block from the buffer pool, to use the
insert buffer (change buffer) as much as possible. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
fix_block->unfix();
- /* Now we are only holding the buf_pool->mutex,
+ /* Now we are only holding the buf_pool.mutex,
not block->mutex or hash_lock. Blocks cannot be
relocated or enter or exit the buf_pool while we
- are holding the buf_pool->mutex. */
+ are holding the buf_pool.mutex. */
if (buf_LRU_free_page(&fix_block->page, true)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* page_hash can be changed. */
hash_lock = buf_page_hash_lock_get(page_id);
@@ -4258,7 +3822,7 @@ evict_from_pool:
/* Failed to evict the page; change it directly */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
@@ -4305,7 +3869,7 @@ evict_from_pool:
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
ut_a(buf_block_get_state(fix_block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -4471,7 +4035,7 @@ buf_page_optimistic_get(
mtr_memo_push(mtr, block, fix_type);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
ut_a(block->page.buf_fix_count > 0);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -4483,7 +4047,7 @@ buf_page_optimistic_get(
ibuf_inside(mtr));
}
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
return(TRUE);
}
@@ -4553,14 +4117,14 @@ buf_page_try_get_func(
mtr_memo_push(mtr, block, fix_type);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
ut_a(block->page.buf_fix_count > 0);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
- buf_pool->stat.n_page_gets++;
+ buf_pool.stat.n_page_gets++;
return(block);
}
@@ -4597,7 +4161,7 @@ static void buf_page_init(const page_id_t page_id, ulint zip_size,
{
buf_page_t* hash_page;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_mutex_own(block));
ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
ut_ad(rw_lock_own(buf_page_hash_lock_get(page_id), RW_LOCK_X));
@@ -4643,10 +4207,9 @@ static void buf_page_init(const page_id_t page_id, ulint zip_size,
<< hash_page << ", " << block;
ut_d(buf_page_mutex_exit(block));
- ut_d(mutex_exit(&buf_pool->mutex));
- ut_d(buf_print());
+ ut_d(mutex_exit(&buf_pool.mutex));
+ ut_d(buf_pool.print());
ut_d(buf_LRU_print());
- ut_d(buf_validate());
ut_d(buf_LRU_validate());
ut_error;
}
@@ -4657,7 +4220,7 @@ static void buf_page_init(const page_id_t page_id, ulint zip_size,
block->page.id = page_id;
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
page_id.fold(), &block->page);
page_zip_set_size(&block->page.zip, zip_size);
@@ -4722,7 +4285,7 @@ buf_page_init_for_read(
ut_ad(block);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
@@ -4772,14 +4335,14 @@ buf_page_init_for_read(
rw_lock_x_lock_gen(&block->lock, BUF_IO_READ);
if (zip_size) {
- /* buf_pool->mutex may be released and
+ /* buf_pool.mutex may be released and
reacquired by buf_buddy_alloc(). Thus, we
must release block->mutex in order not to
break the latching order in the reacquisition
- of buf_pool->mutex. We also must defer this
+ of buf_pool.mutex. We also must defer this
operation until after the block descriptor has
- been added to buf_pool->LRU and
- buf_pool->page_hash. */
+ been added to buf_pool.LRU and
+ buf_pool.page_hash. */
buf_page_mutex_exit(block);
data = buf_buddy_alloc(zip_size, &lru);
buf_page_mutex_enter(block);
@@ -4807,7 +4370,7 @@ buf_page_init_for_read(
rw_lock_x_lock(hash_lock);
/* If buf_buddy_alloc() allocated storage from the LRU list,
- it released and reacquired buf_pool->mutex. Thus, we must
+ it released and reacquired buf_pool.mutex. Thus, we must
check the page_hash again, as it may have been modified. */
if (UNIV_UNLIKELY(lru)) {
watch_page = buf_page_hash_get_low(page_id);
@@ -4831,7 +4394,7 @@ buf_page_init_for_read(
page_zip_set_size(&bpage->zip, zip_size);
bpage->zip.data = (page_zip_t*) data;
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
UNIV_MEM_DESC(bpage->zip.data, zip_size);
buf_page_init_low(bpage);
@@ -4863,7 +4426,7 @@ buf_page_init_for_read(
buf_pool_watch_remove(watch_page);
}
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
bpage->id.fold(), bpage);
rw_lock_x_unlock(hash_lock);
@@ -4877,12 +4440,12 @@ buf_page_init_for_read(
buf_page_set_io_fix(bpage, BUF_IO_READ);
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
}
- buf_pool->n_pend_reads++;
+ buf_pool.n_pend_reads++;
func_exit:
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
@@ -4920,7 +4483,7 @@ buf_page_create(
free_block = buf_LRU_get_free_block();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
@@ -4931,7 +4494,7 @@ buf_page_create(
&& buf_page_in_file(&block->page)
&& !buf_pool_watch_is_sentinel(&block->page)) {
/* Page can be found in buf_pool */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
rw_lock_x_unlock(hash_lock);
buf_block_free(free_block);
@@ -4969,23 +4532,23 @@ buf_page_create(
buf_LRU_add_block(&block->page, FALSE);
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
- buf_pool->stat.n_pages_created++;
+ buf_pool.stat.n_pages_created++;
if (zip_size) {
/* Prevent race conditions during buf_buddy_alloc(),
- which may release and reacquire buf_pool->mutex,
+ which may release and reacquire buf_pool.mutex,
by IO-fixing and X-latching the block. */
buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
buf_page_mutex_exit(block);
- /* buf_pool->mutex may be released and reacquired by
+ /* buf_pool.mutex may be released and reacquired by
buf_buddy_alloc(). Thus, we must release block->mutex
in order not to break the latching order in
- the reacquisition of buf_pool->mutex. We also must
+ the reacquisition of buf_pool.mutex. We also must
defer this operation until after the block descriptor
- has been added to buf_pool->LRU and buf_pool->page_hash. */
+ has been added to buf_pool.LRU and buf_pool.page_hash. */
block->page.zip.data = buf_buddy_alloc(zip_size);
buf_page_mutex_enter(block);
@@ -5001,7 +4564,7 @@ buf_page_create(
rw_lock_x_unlock(&block->lock);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
@@ -5033,7 +4596,7 @@ buf_page_create(
memset_aligned<8>(frame + FIL_PAGE_LSN, 0, 8);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- if (!(++buf_dbg_counter % 5771)) buf_validate();
+ if (!(++buf_dbg_counter % 5771)) buf_pool.validate();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
return(block);
}
@@ -5176,7 +4739,7 @@ buf_corrupt_page_release(buf_page_t* bpage, const fil_space_t* space)
page_id_t old_page_id = bpage->id;
/* First unfix and release lock on the bpage */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(buf_page_get_mutex(bpage));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ);
ut_ad(bpage->id.space() == space->id);
@@ -5203,10 +4766,10 @@ buf_corrupt_page_release(buf_page_t* bpage, const fil_space_t* space)
/* After this point bpage can't be referenced. */
buf_LRU_free_one_page(bpage, old_page_id);
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Check if the encrypted page is corrupted for the full crc32 format.
@@ -5352,10 +4915,10 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict)
}
if (bpage->zip.data && uncompressed) {
- buf_pool->n_pend_unzip++;
+ buf_pool.n_pend_unzip++;
ibool ok = buf_zip_decompress((buf_block_t*) bpage,
FALSE);
- buf_pool->n_pend_unzip--;
+ buf_pool.n_pend_unzip--;
if (!ok) {
ib::info() << "Page "
@@ -5513,7 +5076,7 @@ release_page:
}
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(block_mutex);
/* Because this thread which does the unlocking is not the same that
@@ -5529,9 +5092,9 @@ release_page:
the x-latch to this OS thread: do not let this confuse you in
debugging! */
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
- buf_pool->stat.n_pages_read++;
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
+ buf_pool.stat.n_pages_read++;
if (uncompressed) {
rw_lock_x_unlock_gen(&((buf_block_t*) bpage)->lock,
@@ -5550,7 +5113,7 @@ release_page:
BUF_IO_WRITE);
}
- buf_pool->stat.n_pages_written++;
+ buf_pool.stat.n_pages_written++;
/* We decide whether or not to evict the page from the
LRU list based on the flush_type.
@@ -5572,39 +5135,37 @@ release_page:
DBUG_PRINT("ib_buf", ("%s page %u:%u",
io_type == BUF_IO_READ ? "read" : "wrote",
bpage->id.space(), bpage->id.page_no()));
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return DB_SUCCESS;
}
-/** Assert that all buffer pool pages are in a replaceable state */
-void buf_assert_all_freed()
+#ifdef UNIV_DEBUG
+/** Check that all blocks are in a replaceable state.
+@return address of a non-free block
+@retval nullptr if all freed */
+void buf_pool_t::assert_all_freed()
{
- mutex_enter(&buf_pool->mutex);
- buf_chunk_t* chunk = buf_pool->chunks;
-
- for (ulint i = buf_pool->n_chunks; i--; chunk++) {
-
- if (const buf_block_t* block = buf_chunk_not_freed(chunk)) {
- ib::fatal() << "Page " << block->page.id
- << " still fixed or dirty";
- }
- }
-
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&mutex);
+ const chunk_t *chunk= chunks;
+ for (auto i= n_chunks; i--; chunk++)
+ if (const buf_block_t* block= chunk->not_freed())
+ ib::fatal() << "Page " << block->page.id << " still fixed or dirty";
+ mutex_exit(&mutex);
}
+#endif /* UNIV_DEBUG */
/** Refresh the statistics used to print per-second averages. */
void buf_refresh_io_stats()
{
- buf_pool->last_printout_time = time(NULL);
- buf_pool->old_stat = buf_pool->stat;
+ buf_pool.last_printout_time = time(NULL);
+ buf_pool.old_stat = buf_pool.stat;
}
/** Invalidate all pages in the buffer pool.
All pages must be in a replaceable state (not modified or latched). */
void buf_pool_invalidate()
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
for (unsigned i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
@@ -5613,45 +5174,45 @@ void buf_pool_invalidate()
is single threaded (apart from IO helper threads) at
this stage. No new write batch can be in intialization
stage at this point. */
- ut_ad(!buf_pool->init_flush[i]);
+ ut_ad(!buf_pool.init_flush[i]);
/* However, it is possible that a write batch that has
been posted earlier is still not complete. For buffer
pool invalidation to proceed we must ensure there is NO
write activity happening. */
- if (buf_pool->n_flush[i] > 0) {
+ if (buf_pool.n_flush[i] > 0) {
buf_flush_t type = buf_flush_t(i);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
buf_flush_wait_batch_end(type);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
}
}
- ut_d(mutex_exit(&buf_pool->mutex));
- ut_d(buf_assert_all_freed());
- ut_d(mutex_enter(&buf_pool->mutex));
+ ut_d(mutex_exit(&buf_pool.mutex));
+ ut_d(buf_pool.assert_all_freed());
+ ut_d(mutex_enter(&buf_pool.mutex));
while (buf_LRU_scan_and_free_block(true));
- ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
- ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.LRU) == 0);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.unzip_LRU) == 0);
- buf_pool->freed_page_clock = 0;
- buf_pool->LRU_old = NULL;
- buf_pool->LRU_old_len = 0;
+ buf_pool.freed_page_clock = 0;
+ buf_pool.LRU_old = NULL;
+ buf_pool.LRU_old_len = 0;
- memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
+ memset(&buf_pool.stat, 0x00, sizeof(buf_pool.stat));
buf_refresh_io_stats();
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/** Validate the buffer pool. */
-void buf_validate()
+void buf_pool_t::validate()
{
buf_page_t* b;
- buf_chunk_t* chunk;
+ buf_pool_t::chunk_t* chunk;
ulint i;
ulint n_lru_flush = 0;
ulint n_page_flush = 0;
@@ -5661,14 +5222,14 @@ void buf_validate()
ulint n_free = 0;
ulint n_zip = 0;
- mutex_enter(&buf_pool->mutex);
- hash_lock_x_all(buf_pool->page_hash);
+ mutex_enter(&buf_pool.mutex);
+ hash_lock_x_all(buf_pool.page_hash);
- chunk = buf_pool->chunks;
+ chunk = buf_pool.chunks;
/* Check the uncompressed blocks. */
- for (i = buf_pool->n_chunks; i--; chunk++) {
+ for (i = buf_pool.n_chunks; i--; chunk++) {
ulint j;
buf_block_t* block = chunk->blocks;
@@ -5743,11 +5304,11 @@ assert_s_latched:
}
}
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
/* Check clean compressed-only blocks. */
- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
+ for (b = UT_LIST_GET_FIRST(buf_pool.zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
switch (buf_page_get_io_fix(b)) {
@@ -5767,7 +5328,7 @@ assert_s_latched:
}
/* It is OK to read oldest_modification here because
- we have acquired buf_pool->zip_mutex above which acts
+ we have acquired buf_pool.zip_mutex above which acts
as the 'block->mutex' for these bpages. */
ut_ad(!b->oldest_modification);
ut_ad(buf_page_hash_get_low(b->id) == b);
@@ -5777,8 +5338,8 @@ assert_s_latched:
/* Check dirty blocks. */
- mutex_enter(&buf_pool->flush_list_mutex);
- for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
+ mutex_enter(&buf_pool.flush_list_mutex);
+ for (b = UT_LIST_GET_FIRST(buf_pool.flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
ut_ad(b->oldest_modification);
@@ -5825,36 +5386,36 @@ assert_s_latched:
ut_ad(buf_page_hash_get_low(b->id) == b);
}
- ut_ad(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.flush_list) == n_flush);
- hash_unlock_x_all(buf_pool->page_hash);
- mutex_exit(&buf_pool->flush_list_mutex);
+ hash_unlock_x_all(buf_pool.page_hash);
+ mutex_exit(&buf_pool.flush_list_mutex);
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
- if (buf_pool->curr_size == buf_pool->old_size
- && n_lru + n_free > buf_pool->curr_size + n_zip) {
+ if (buf_pool.curr_size == buf_pool.old_size
+ && n_lru + n_free > buf_pool.curr_size + n_zip) {
ib::fatal() << "n_LRU " << n_lru << ", n_free " << n_free
- << ", pool " << buf_pool->curr_size
+ << ", pool " << buf_pool.curr_size
<< " zip " << n_zip << ". Aborting...";
}
- ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.LRU) == n_lru);
- if (buf_pool->curr_size == buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->free) != n_free) {
+ if (buf_pool.curr_size == buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.free) != n_free) {
ib::fatal() << "Free list len "
- << UT_LIST_GET_LEN(buf_pool->free)
+ << UT_LIST_GET_LEN(buf_pool.free)
<< ", free blocks " << n_free << ". Aborting...";
}
- ut_ad(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
- ut_ad(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
- ut_ad(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_page_flush);
+ ut_ad(buf_pool.n_flush[BUF_FLUSH_LIST] == n_list_flush);
+ ut_ad(buf_pool.n_flush[BUF_FLUSH_LRU] == n_lru_flush);
+ ut_ad(buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE] == n_page_flush);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
ut_d(buf_LRU_validate());
ut_d(buf_flush_validate());
@@ -5863,7 +5424,7 @@ assert_s_latched:
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/** Write information of the buf_pool to the error log. */
-void buf_print()
+void buf_pool_t::print()
{
index_id_t* index_ids;
ulint* counts;
@@ -5872,45 +5433,45 @@ void buf_print()
ulint j;
index_id_t id;
ulint n_found;
- buf_chunk_t* chunk;
+ buf_pool_t::chunk_t* chunk;
dict_index_t* index;
- size = buf_pool->curr_size;
+ size = curr_size;
index_ids = static_cast<index_id_t*>(
ut_malloc_nokey(size * sizeof *index_ids));
counts = static_cast<ulint*>(ut_malloc_nokey(sizeof(ulint) * size));
- mutex_enter(&buf_pool->mutex);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&mutex);
+ mutex_enter(&flush_list_mutex);
ib::info()
- << "[buffer pool: size=" << buf_pool->curr_size
- << ", database pages=" << UT_LIST_GET_LEN(buf_pool->LRU)
- << ", free pages=" << UT_LIST_GET_LEN(buf_pool->free)
+ << "[buffer pool: size=" << curr_size
+ << ", database pages=" << UT_LIST_GET_LEN(LRU)
+ << ", free pages=" << UT_LIST_GET_LEN(free)
<< ", modified database pages="
- << UT_LIST_GET_LEN(buf_pool->flush_list)
- << ", n pending decompressions=" << buf_pool->n_pend_unzip
- << ", n pending reads=" << buf_pool->n_pend_reads
- << ", n pending flush LRU=" << buf_pool->n_flush[BUF_FLUSH_LRU]
- << " list=" << buf_pool->n_flush[BUF_FLUSH_LIST]
- << " single page=" << buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]
- << ", pages made young=" << buf_pool->stat.n_pages_made_young
- << ", not young=" << buf_pool->stat.n_pages_not_made_young
- << ", pages read=" << buf_pool->stat.n_pages_read
- << ", created=" << buf_pool->stat.n_pages_created
- << ", written=" << buf_pool->stat.n_pages_written << "]";
-
- mutex_exit(&buf_pool->flush_list_mutex);
+ << UT_LIST_GET_LEN(flush_list)
+ << ", n pending decompressions=" << n_pend_unzip
+ << ", n pending reads=" << n_pend_reads
+ << ", n pending flush LRU=" << n_flush[BUF_FLUSH_LRU]
+ << " list=" << n_flush[BUF_FLUSH_LIST]
+ << " single page=" << n_flush[BUF_FLUSH_SINGLE_PAGE]
+ << ", pages made young=" << stat.n_pages_made_young
+ << ", not young=" << stat.n_pages_not_made_young
+ << ", pages read=" << stat.n_pages_read
+ << ", created=" << stat.n_pages_created
+ << ", written=" << stat.n_pages_written << "]";
+
+ mutex_exit(&flush_list_mutex);
/* Count the number of blocks belonging to each index in the buffer */
n_found = 0;
- chunk = buf_pool->chunks;
+ chunk = chunks;
- for (i = buf_pool->n_chunks; i--; chunk++) {
+ for (i = n_chunks; i--; chunk++) {
buf_block_t* block = chunk->blocks;
ulint n_blocks = chunk->size;
@@ -5943,7 +5504,7 @@ void buf_print()
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&mutex);
for (i = 0; i < n_found; i++) {
index = dict_index_get_if_in_cache(index_ids[i]);
@@ -5963,7 +5524,7 @@ void buf_print()
ut_free(index_ids);
ut_free(counts);
- buf_validate();
+ validate();
}
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -5973,14 +5534,13 @@ ulint buf_get_latched_pages_number()
{
buf_page_t* b;
ulint i;
- buf_chunk_t* chunk;
ulint fixed_pages_number = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- chunk = buf_pool->chunks;
+ auto chunk = buf_pool.chunks;
- for (i = buf_pool->n_chunks; i--; chunk++) {
+ for (i = buf_pool.n_chunks; i--; chunk++) {
buf_block_t* block;
ulint j;
@@ -6005,11 +5565,11 @@ ulint buf_get_latched_pages_number()
}
}
- mutex_enter(&buf_pool->zip_mutex);
+ mutex_enter(&buf_pool.zip_mutex);
/* Traverse the lists of clean and dirty compressed-only blocks. */
- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
+ for (b = UT_LIST_GET_FIRST(buf_pool.zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
ut_a(buf_page_get_io_fix(b) != BUF_IO_WRITE);
@@ -6020,8 +5580,8 @@ ulint buf_get_latched_pages_number()
}
}
- mutex_enter(&buf_pool->flush_list_mutex);
- for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
+ mutex_enter(&buf_pool.flush_list_mutex);
+ for (b = UT_LIST_GET_FIRST(buf_pool.flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
@@ -6046,9 +5606,9 @@ ulint buf_get_latched_pages_number()
}
}
- mutex_exit(&buf_pool->flush_list_mutex);
- mutex_exit(&buf_pool->zip_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
+ mutex_exit(&buf_pool.mutex);
return(fixed_pages_number);
}
@@ -6061,116 +5621,116 @@ void buf_stats_get_pool_info(buf_pool_info_t *pool_info)
time_t current_time;
double time_elapsed;
- mutex_enter(&buf_pool->mutex);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- pool_info->pool_size = buf_pool->curr_size;
+ pool_info->pool_size = buf_pool.curr_size;
- pool_info->lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ pool_info->lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
- pool_info->old_lru_len = buf_pool->LRU_old_len;
+ pool_info->old_lru_len = buf_pool.LRU_old_len;
- pool_info->free_list_len = UT_LIST_GET_LEN(buf_pool->free);
+ pool_info->free_list_len = UT_LIST_GET_LEN(buf_pool.free);
- pool_info->flush_list_len = UT_LIST_GET_LEN(buf_pool->flush_list);
+ pool_info->flush_list_len = UT_LIST_GET_LEN(buf_pool.flush_list);
- pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
- pool_info->n_pend_reads = buf_pool->n_pend_reads;
+ pool_info->n_pend_reads = buf_pool.n_pend_reads;
pool_info->n_pending_flush_lru =
- (buf_pool->n_flush[BUF_FLUSH_LRU]
- + buf_pool->init_flush[BUF_FLUSH_LRU]);
+ (buf_pool.n_flush[BUF_FLUSH_LRU]
+ + buf_pool.init_flush[BUF_FLUSH_LRU]);
pool_info->n_pending_flush_list =
- (buf_pool->n_flush[BUF_FLUSH_LIST]
- + buf_pool->init_flush[BUF_FLUSH_LIST]);
+ (buf_pool.n_flush[BUF_FLUSH_LIST]
+ + buf_pool.init_flush[BUF_FLUSH_LIST]);
pool_info->n_pending_flush_single_page =
- (buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]
- + buf_pool->init_flush[BUF_FLUSH_SINGLE_PAGE]);
+ (buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE]
+ + buf_pool.init_flush[BUF_FLUSH_SINGLE_PAGE]);
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
current_time = time(NULL);
time_elapsed = 0.001 + difftime(current_time,
- buf_pool->last_printout_time);
+ buf_pool.last_printout_time);
- pool_info->n_pages_made_young = buf_pool->stat.n_pages_made_young;
+ pool_info->n_pages_made_young = buf_pool.stat.n_pages_made_young;
pool_info->n_pages_not_made_young =
- buf_pool->stat.n_pages_not_made_young;
+ buf_pool.stat.n_pages_not_made_young;
- pool_info->n_pages_read = buf_pool->stat.n_pages_read;
+ pool_info->n_pages_read = buf_pool.stat.n_pages_read;
- pool_info->n_pages_created = buf_pool->stat.n_pages_created;
+ pool_info->n_pages_created = buf_pool.stat.n_pages_created;
- pool_info->n_pages_written = buf_pool->stat.n_pages_written;
+ pool_info->n_pages_written = buf_pool.stat.n_pages_written;
- pool_info->n_page_gets = buf_pool->stat.n_page_gets;
+ pool_info->n_page_gets = buf_pool.stat.n_page_gets;
- pool_info->n_ra_pages_read_rnd = buf_pool->stat.n_ra_pages_read_rnd;
- pool_info->n_ra_pages_read = buf_pool->stat.n_ra_pages_read;
+ pool_info->n_ra_pages_read_rnd = buf_pool.stat.n_ra_pages_read_rnd;
+ pool_info->n_ra_pages_read = buf_pool.stat.n_ra_pages_read;
- pool_info->n_ra_pages_evicted = buf_pool->stat.n_ra_pages_evicted;
+ pool_info->n_ra_pages_evicted = buf_pool.stat.n_ra_pages_evicted;
pool_info->page_made_young_rate =
- static_cast<double>(buf_pool->stat.n_pages_made_young
- - buf_pool->old_stat.n_pages_made_young)
+ static_cast<double>(buf_pool.stat.n_pages_made_young
+ - buf_pool.old_stat.n_pages_made_young)
/ time_elapsed;
pool_info->page_not_made_young_rate =
- static_cast<double>(buf_pool->stat.n_pages_not_made_young
- - buf_pool->old_stat.n_pages_not_made_young)
+ static_cast<double>(buf_pool.stat.n_pages_not_made_young
+ - buf_pool.old_stat.n_pages_not_made_young)
/ time_elapsed;
pool_info->pages_read_rate =
- static_cast<double>(buf_pool->stat.n_pages_read
- - buf_pool->old_stat.n_pages_read)
+ static_cast<double>(buf_pool.stat.n_pages_read
+ - buf_pool.old_stat.n_pages_read)
/ time_elapsed;
pool_info->pages_created_rate =
- static_cast<double>(buf_pool->stat.n_pages_created
- - buf_pool->old_stat.n_pages_created)
+ static_cast<double>(buf_pool.stat.n_pages_created
+ - buf_pool.old_stat.n_pages_created)
/ time_elapsed;
pool_info->pages_written_rate =
- static_cast<double>(buf_pool->stat.n_pages_written
- - buf_pool->old_stat.n_pages_written)
+ static_cast<double>(buf_pool.stat.n_pages_written
+ - buf_pool.old_stat.n_pages_written)
/ time_elapsed;
- pool_info->n_page_get_delta = buf_pool->stat.n_page_gets
- - buf_pool->old_stat.n_page_gets;
+ pool_info->n_page_get_delta = buf_pool.stat.n_page_gets
+ - buf_pool.old_stat.n_page_gets;
if (pool_info->n_page_get_delta) {
- pool_info->page_read_delta = buf_pool->stat.n_pages_read
- - buf_pool->old_stat.n_pages_read;
+ pool_info->page_read_delta = buf_pool.stat.n_pages_read
+ - buf_pool.old_stat.n_pages_read;
pool_info->young_making_delta =
- buf_pool->stat.n_pages_made_young
- - buf_pool->old_stat.n_pages_made_young;
+ buf_pool.stat.n_pages_made_young
+ - buf_pool.old_stat.n_pages_made_young;
pool_info->not_young_making_delta =
- buf_pool->stat.n_pages_not_made_young
- - buf_pool->old_stat.n_pages_not_made_young;
+ buf_pool.stat.n_pages_not_made_young
+ - buf_pool.old_stat.n_pages_not_made_young;
}
pool_info->pages_readahead_rnd_rate =
- static_cast<double>(buf_pool->stat.n_ra_pages_read_rnd
- - buf_pool->old_stat.n_ra_pages_read_rnd)
+ static_cast<double>(buf_pool.stat.n_ra_pages_read_rnd
+ - buf_pool.old_stat.n_ra_pages_read_rnd)
/ time_elapsed;
pool_info->pages_readahead_rate =
- static_cast<double>(buf_pool->stat.n_ra_pages_read
- - buf_pool->old_stat.n_ra_pages_read)
+ static_cast<double>(buf_pool.stat.n_ra_pages_read
+ - buf_pool.old_stat.n_ra_pages_read)
/ time_elapsed;
pool_info->pages_evicted_rate =
- static_cast<double>(buf_pool->stat.n_ra_pages_evicted
- - buf_pool->old_stat.n_ra_pages_evicted)
+ static_cast<double>(buf_pool.stat.n_ra_pages_evicted
+ - buf_pool.old_stat.n_ra_pages_evicted)
/ time_elapsed;
- pool_info->unzip_lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ pool_info->unzip_lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
pool_info->io_sum = buf_LRU_stat_sum.io;
@@ -6181,7 +5741,7 @@ void buf_stats_get_pool_info(buf_pool_info_t *pool_info)
pool_info->unzip_cur = buf_LRU_stat_cur.unzip;
buf_refresh_io_stats();
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/*********************************************************************//**
@@ -6315,13 +5875,13 @@ bool buf_page_verify_crypt_checksum(const byte* page, ulint fsp_flags)
ulint buf_pool_check_no_pending_io()
{
/* FIXME: use atomics, no mutex */
- ulint pending_io = buf_pool->n_pend_reads;
- mutex_enter(&buf_pool->mutex);
+ ulint pending_io = buf_pool.n_pend_reads;
+ mutex_enter(&buf_pool.mutex);
pending_io +=
- + buf_pool->n_flush[BUF_FLUSH_LRU]
- + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]
- + buf_pool->n_flush[BUF_FLUSH_LIST];
- mutex_exit(&buf_pool->mutex);
+ + buf_pool.n_flush[BUF_FLUSH_LRU]
+ + buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE]
+ + buf_pool.n_flush[BUF_FLUSH_LIST];
+ mutex_exit(&buf_pool.mutex);
return(pending_io);
}
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index 2b3e5528cfc..f7b1d05854f 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -288,13 +288,13 @@ buf_dump(
ulint n_pages;
ulint j;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- n_pages = UT_LIST_GET_LEN(buf_pool->LRU);
+ n_pages = UT_LIST_GET_LEN(buf_pool.LRU);
/* skip empty buffer pools */
if (n_pages == 0) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
goto done;
}
@@ -303,7 +303,7 @@ buf_dump(
/* limit the number of total pages dumped to X% of the
total number of pages */
- t_pages = buf_pool->curr_size * srv_buf_pool_dump_pct / 100;
+ t_pages = buf_pool.curr_size * srv_buf_pool_dump_pct / 100;
if (n_pages > t_pages) {
buf_dump_status(STATUS_INFO,
"Restricted to " ULINTPF
@@ -322,7 +322,7 @@ buf_dump(
n_pages * sizeof(*dump)));
if (dump == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
fclose(f);
buf_dump_status(STATUS_ERR,
"Cannot allocate " ULINTPF " bytes: %s",
@@ -332,7 +332,7 @@ buf_dump(
return;
}
- for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU), j = 0;
+ for (bpage = UT_LIST_GET_FIRST(buf_pool.LRU), j = 0;
bpage != NULL && j < n_pages;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -347,7 +347,7 @@ buf_dump(
bpage->id.page_no());
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
ut_a(j <= n_pages);
n_pages = j;
@@ -550,7 +550,7 @@ buf_load()
/* If dump is larger than the buffer pool(s), then we ignore the
extra trailing. This could happen if a dump is made, then buffer
pool is shrunk and then load is attempted. */
- dump_n = std::min(dump_n, buf_pool_get_n_pages());
+ dump_n = std::min(dump_n, buf_pool.get_n_pages());
if (dump_n != 0) {
dump = static_cast<buf_dump_t*>(ut_malloc_nokey(
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 1b1c85fd647..741cc5d8eb1 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -204,9 +204,9 @@ in thrashing. */
static inline void incr_flush_list_size_in_bytes(const buf_block_t* block)
{
/* FIXME: use std::atomic! */
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
- buf_pool->stat.flush_list_bytes += block->physical_size();
- ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
+ buf_pool.stat.flush_list_bytes += block->physical_size();
+ ut_ad(buf_pool.stat.flush_list_bytes <= buf_pool.curr_pool_size);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -252,14 +252,14 @@ buf_flush_insert_in_flush_rbt(
buf_page_t* prev = NULL;
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
/* Insert this buffer into the rbt. */
- c_node = rbt_insert(buf_pool->flush_rbt, &bpage, &bpage);
+ c_node = rbt_insert(buf_pool.flush_rbt, &bpage, &bpage);
ut_a(c_node != NULL);
/* Get the predecessor. */
- p_node = rbt_prev(buf_pool->flush_rbt, c_node);
+ p_node = rbt_prev(buf_pool.flush_rbt, c_node);
if (p_node != NULL) {
buf_page_t** value;
@@ -279,12 +279,12 @@ buf_flush_delete_from_flush_rbt(
/*============================*/
buf_page_t* bpage) /*!< in: bpage to be removed. */
{
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
#ifdef UNIV_DEBUG
ibool ret =
#endif /* UNIV_DEBUG */
- rbt_delete(buf_pool->flush_rbt, &bpage);
+ rbt_delete(buf_pool.flush_rbt, &bpage);
ut_ad(ret);
}
@@ -294,7 +294,7 @@ Compare two modified blocks in the buffer pool. The key for comparison
is:
key = <oldest_modification, space, offset>
This comparison is used to maintian ordering of blocks in the
-buf_pool->flush_rbt.
+buf_pool.flush_rbt.
Note that for the purpose of flush_rbt, we only need to order blocks
on the oldest_modification. The other two fields are used to uniquely
identify the blocks.
@@ -313,7 +313,7 @@ buf_flush_block_cmp(
ut_ad(b1 != NULL);
ut_ad(b2 != NULL);
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
ut_ad(b1->in_flush_list);
ut_ad(b2->in_flush_list);
@@ -339,12 +339,12 @@ void
buf_flush_init_flush_rbt(void)
/*==========================*/
{
- mutex_enter(&buf_pool->flush_list_mutex);
- ut_ad(buf_pool->flush_rbt == NULL);
+ mutex_enter(&buf_pool.flush_list_mutex);
+ ut_ad(buf_pool.flush_rbt == NULL);
/* Create red black tree for speedy insertions in flush list. */
- buf_pool->flush_rbt = rbt_create(
+ buf_pool.flush_rbt = rbt_create(
sizeof(buf_page_t*), buf_flush_block_cmp);
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@@ -353,13 +353,13 @@ void
buf_flush_free_flush_rbt(void)
/*==========================*/
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- rbt_free(buf_pool->flush_rbt);
- buf_pool->flush_rbt = NULL;
- mutex_exit(&buf_pool->flush_list_mutex);
+ rbt_free(buf_pool.flush_rbt);
+ buf_pool.flush_rbt = NULL;
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/** Insert a modified block into the flush list.
@@ -367,12 +367,12 @@ buf_flush_free_flush_rbt(void)
@param[in] lsn oldest modification */
void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
{
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(log_flush_order_mutex_own());
ut_ad(buf_page_mutex_own(block));
ut_ad(lsn);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
ut_ad(!block->page.in_flush_list);
ut_d(block->page.in_flush_list = TRUE);
ut_ad(!block->page.oldest_modification);
@@ -382,9 +382,9 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
block->physical_size());
incr_flush_list_size_in_bytes(block);
- if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
+ if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
- /* The field in_LRU_list is protected by buf_pool->mutex, which
+ /* The field in_LRU_list is protected by buf_pool.mutex, which
we are not holding. However, while a block is in the flush
list, it is dirty and cannot be discarded, not from the
page_hash or from the LRU list. At most, the uncompressed
@@ -402,18 +402,18 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
if (buf_page_t* prev_b =
buf_flush_insert_in_flush_rbt(&block->page)) {
- UT_LIST_INSERT_AFTER(buf_pool->flush_list, prev_b, &block->page);
+ UT_LIST_INSERT_AFTER(buf_pool.flush_list, prev_b, &block->page);
goto func_exit;
}
}
- UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
+ UT_LIST_ADD_FIRST(buf_pool.flush_list, &block->page);
func_exit:
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_skip();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@@ -426,7 +426,7 @@ buf_flush_ready_for_replace(
buf_page_t* bpage) /*!< in: buffer control block, must be
buf_page_in_file(bpage) and in the LRU list */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_LRU_list);
@@ -453,7 +453,7 @@ buf_flush_ready_for_flush(
buf_page_in_file(bpage) */
buf_flush_t flush_type)/*!< in: type of flush */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
@@ -489,18 +489,18 @@ void buf_flush_remove(buf_page_t* bpage)
INNODB_EXTEND_TIMEOUT_INTERVAL,
"Flush and remove page with tablespace id %u"
", flush list length " ULINTPF,
- bpage->space, UT_LIST_GET_LEN(buf_pool->flush_list));
+ bpage->space, UT_LIST_GET_LEN(buf_pool.flush_list));
}
#endif
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
/* Important that we adjust the hazard pointer before removing
the bpage from flush list. */
- buf_pool->flush_hp.adjust(bpage);
+ buf_pool.flush_hp.adjust(bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_POOL_WATCH:
@@ -514,18 +514,18 @@ void buf_flush_remove(buf_page_t* bpage)
return;
case BUF_BLOCK_ZIP_DIRTY:
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
- UT_LIST_REMOVE(buf_pool->flush_list, bpage);
+ UT_LIST_REMOVE(buf_pool.flush_list, bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
break;
case BUF_BLOCK_FILE_PAGE:
- UT_LIST_REMOVE(buf_pool->flush_list, bpage);
+ UT_LIST_REMOVE(buf_pool.flush_list, bpage);
break;
}
/* If the flush_rbt is active then delete from there as well. */
- if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
+ if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
}
@@ -533,7 +533,7 @@ void buf_flush_remove(buf_page_t* bpage)
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
- buf_pool->stat.flush_list_bytes -= bpage->physical_size();
+ buf_pool.stat.flush_list_bytes -= bpage->physical_size();
bpage->oldest_modification = 0;
@@ -541,7 +541,7 @@ void buf_flush_remove(buf_page_t* bpage)
buf_flush_validate_skip();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/*******************************************************************//**
@@ -564,10 +564,10 @@ buf_flush_relocate_on_flush_list(
buf_page_t* prev;
buf_page_t* prev_b = NULL;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
/* FIXME: At this point we have both buf_pool and flush_list
mutexes. Theoretically removal of a block from flush list is
@@ -581,38 +581,38 @@ buf_flush_relocate_on_flush_list(
/* If recovery is active we must swap the control blocks in
the flush_rbt as well. */
- if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
+ if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
prev_b = buf_flush_insert_in_flush_rbt(dpage);
}
/* Important that we adjust the hazard pointer before removing
the bpage from the flush list. */
- buf_pool->flush_hp.adjust(bpage);
+ buf_pool.flush_hp.adjust(bpage);
/* Must be done after we have removed it from the flush_rbt
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
prev = UT_LIST_GET_PREV(list, bpage);
- UT_LIST_REMOVE(buf_pool->flush_list, bpage);
+ UT_LIST_REMOVE(buf_pool.flush_list, bpage);
if (prev) {
ut_ad(prev->in_flush_list);
- UT_LIST_INSERT_AFTER( buf_pool->flush_list, prev, dpage);
+ UT_LIST_INSERT_AFTER( buf_pool.flush_list, prev, dpage);
} else {
- UT_LIST_ADD_FIRST(buf_pool->flush_list, dpage);
+ UT_LIST_ADD_FIRST(buf_pool.flush_list, dpage);
}
/* Just an extra check. Previous in flush_list
should be the same control block as in flush_rbt. */
- ut_a(buf_pool->flush_rbt == NULL || prev_b == prev);
+ ut_a(buf_pool.flush_rbt == NULL || prev_b == prev);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
/** Update the flush system data structures when a write is completed.
@@ -625,17 +625,17 @@ void buf_flush_write_complete(buf_page_t* bpage, bool dblwr)
buf_flush_remove(bpage);
const buf_flush_t flush_type = buf_page_get_flush_type(bpage);
- buf_pool->n_flush[flush_type]--;
- ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX);
+ buf_pool.n_flush[flush_type]--;
+ ut_ad(buf_pool.n_flush[flush_type] != ULINT_MAX);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- if (buf_pool->n_flush[flush_type] == 0
- && buf_pool->init_flush[flush_type] == FALSE) {
+ if (buf_pool.n_flush[flush_type] == 0
+ && buf_pool.init_flush[flush_type] == FALSE) {
/* The running flush batch has ended */
- os_event_set(buf_pool->no_flush[flush_type]);
+ os_event_set(buf_pool.no_flush[flush_type]);
}
if (dblwr) {
@@ -958,7 +958,7 @@ static byte* buf_page_encrypt(fil_space_t* space, buf_page_t* bpage, byte* s)
ut_ad(!bpage->zip_size() || !page_compressed);
/* Find free slot from temporary memory array */
- buf_tmp_buffer_t *slot= buf_pool->io_buf.reserve();
+ buf_tmp_buffer_t *slot= buf_pool.io_buf_reserve();
ut_a(slot);
slot->allocate();
slot->out_buf= NULL;
@@ -1042,9 +1042,9 @@ static void buf_flush_freed_page(buf_page_t *bpage, fil_space_t *space)
const bool uncompressed= buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE;
BPageMutex *block_mutex= uncompressed
? &reinterpret_cast<buf_block_t*>(bpage)->mutex
- : &buf_pool->zip_mutex;
+ : &buf_pool.zip_mutex;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(block_mutex);
buf_page_set_io_fix(bpage, BUF_IO_NONE);
@@ -1055,8 +1055,8 @@ static void buf_flush_freed_page(buf_page_t *bpage, fil_space_t *space)
rw_lock_sx_unlock_gen(&reinterpret_cast<buf_block_t*>(bpage)->lock,
BUF_IO_WRITE);
- buf_pool->stat.n_pages_written++;
- mutex_exit(&buf_pool->mutex);
+ buf_pool.stat.n_pages_written++;
+ mutex_exit(&buf_pool.mutex);
const page_id_t page_id(bpage->id);
const auto zip_size= bpage->zip_size();
mutex_exit(block_mutex);
@@ -1110,13 +1110,13 @@ buf_flush_write_block_low(
ut_ad(buf_page_in_file(bpage));
- /* We are not holding buf_pool->mutex or block_mutex here.
+ /* We are not holding buf_pool.mutex or block_mutex here.
Nevertheless, it is safe to access bpage, because it is
io_fixed and oldest_modification != 0. Thus, it cannot be
relocated in the buffer pool or removed from flush_list or
LRU_list. */
- ut_ad(!mutex_own(&buf_pool->mutex));
- ut_ad(!mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
+ ut_ad(!mutex_own(&buf_pool.flush_list_mutex));
ut_ad(!buf_page_get_mutex(bpage)->is_owned());
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0);
@@ -1249,7 +1249,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
BPageMutex* block_mutex;
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(!sync || flush_type == BUF_FLUSH_SINGLE_PAGE);
@@ -1260,7 +1260,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
bool is_uncompressed = (buf_page_get_state(bpage)
== BUF_BLOCK_FILE_PAGE);
- ut_ad(is_uncompressed == (block_mutex != &buf_pool->zip_mutex));
+ ut_ad(is_uncompressed == (block_mutex != &buf_pool.zip_mutex));
rw_lock_t* rw_lock;
bool no_fix_count = bpage->buf_fix_count == 0;
@@ -1290,16 +1290,16 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
buf_page_set_flush_type(bpage, flush_type);
- if (buf_pool->n_flush[flush_type] == 0) {
- os_event_reset(buf_pool->no_flush[flush_type]);
+ if (buf_pool.n_flush[flush_type] == 0) {
+ os_event_reset(buf_pool.no_flush[flush_type]);
}
- ++buf_pool->n_flush[flush_type];
- ut_ad(buf_pool->n_flush[flush_type] != 0);
+ ++buf_pool.n_flush[flush_type];
+ ut_ad(buf_pool.n_flush[flush_type] != 0);
mutex_exit(block_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (flush_type == BUF_FLUSH_LIST
&& is_uncompressed
@@ -1335,7 +1335,7 @@ buf_flush_batch() and buf_flush_page().
@return whether the page was flushed and the mutex released */
bool buf_flush_page_try(buf_block_t* block)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(buf_page_mutex_own(block));
@@ -1364,13 +1364,13 @@ buf_flush_check_neighbor(
ut_ad(flush_type == BUF_FLUSH_LRU
|| flush_type == BUF_FLUSH_LIST);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(page_id);
if (!bpage) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(false);
}
@@ -1389,7 +1389,7 @@ buf_flush_check_neighbor(
}
mutex_exit(block_mutex);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(ret);
}
@@ -1419,7 +1419,7 @@ buf_flush_try_neighbors(
return 0;
}
- if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN
+ if (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN
|| !srv_flush_neighbors || !space->is_rotational()) {
/* If there is little space or neighbor flushing is
not enabled then just flush the victim. */
@@ -1433,8 +1433,8 @@ buf_flush_try_neighbors(
ulint buf_flush_area;
buf_flush_area = ut_min(
- buf_pool->read_ahead_area,
- buf_pool->curr_size / 16);
+ buf_pool.read_ahead_area,
+ buf_pool.curr_size / 16);
low = (page_id.page_no() / buf_flush_area) * buf_flush_area;
high = (page_id.page_no() / buf_flush_area + 1) * buf_flush_area;
@@ -1503,12 +1503,12 @@ buf_flush_try_neighbors(
const page_id_t cur_page_id(page_id.space(), i);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(cur_page_id);
if (bpage == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
continue;
}
@@ -1536,7 +1536,7 @@ buf_flush_try_neighbors(
++count;
} else {
mutex_exit(block_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
continue;
@@ -1544,7 +1544,7 @@ buf_flush_try_neighbors(
mutex_exit(block_mutex);
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
space->release_for_io();
@@ -1579,7 +1579,7 @@ buf_flush_page_and_try_neighbors(
ulint n_to_flush,
ulint* count)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
bool flushed;
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
@@ -1592,20 +1592,20 @@ buf_flush_page_and_try_neighbors(
const page_id_t page_id = bpage->id;
mutex_exit(block_mutex);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Try to flush also all the neighbors */
*count += buf_flush_try_neighbors(
page_id, flush_type, *count, n_to_flush);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
flushed = true;
} else {
mutex_exit(block_mutex);
flushed = false;
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(flushed);
}
@@ -1624,35 +1624,35 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max)
{
ulint scanned = 0;
ulint count = 0;
- ulint free_len = UT_LIST_GET_LEN(buf_pool->free);
- ulint lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ ulint free_len = UT_LIST_GET_LEN(buf_pool.free);
+ ulint lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
while (block != NULL
&& count < max
&& free_len < srv_LRU_scan_depth
- && lru_len > UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
+ && lru_len > UT_LIST_GET_LEN(buf_pool.LRU) / 10) {
++scanned;
if (buf_LRU_free_page(&block->page, false)) {
- /* Block was freed. buf_pool->mutex potentially
+ /* Block was freed. buf_pool.mutex potentially
released and reacquired */
++count;
- block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
} else {
block = UT_LIST_GET_PREV(unzip_LRU, block);
}
- free_len = UT_LIST_GET_LEN(buf_pool->free);
- lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
+ free_len = UT_LIST_GET_LEN(buf_pool.free);
+ lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (scanned) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1675,29 +1675,29 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t* n)
{
buf_page_t* bpage;
ulint scanned = 0;
- ulint free_len = UT_LIST_GET_LEN(buf_pool->free);
- ulint lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ ulint free_len = UT_LIST_GET_LEN(buf_pool.free);
+ ulint lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
ulint withdraw_depth = 0;
n->flushed = 0;
n->evicted = 0;
n->unzip_LRU_evicted = 0;
- ut_ad(mutex_own(&buf_pool->mutex));
- if (buf_pool->curr_size < buf_pool->old_size
- && buf_pool->withdraw_target > 0) {
- withdraw_depth = buf_pool->withdraw_target
- - UT_LIST_GET_LEN(buf_pool->withdraw);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ if (buf_pool.curr_size < buf_pool.old_size
+ && buf_pool.withdraw_target > 0) {
+ withdraw_depth = buf_pool.withdraw_target
+ - UT_LIST_GET_LEN(buf_pool.withdraw);
}
- for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL && n->flushed + n->evicted < max
&& free_len < srv_LRU_scan_depth + withdraw_depth
&& lru_len > BUF_LRU_MIN_LEN;
++scanned,
- bpage = buf_pool->lru_hp.get()) {
+ bpage = buf_pool.lru_hp.get()) {
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
- buf_pool->lru_hp.set(prev);
+ buf_pool.lru_hp.set(prev);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
@@ -1720,25 +1720,25 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t* n)
} else {
/* Can't evict or dispatch this block. Go to
previous. */
- ut_ad(buf_pool->lru_hp.is_hp(prev));
+ ut_ad(buf_pool.lru_hp.is_hp(prev));
mutex_exit(block_mutex);
}
ut_ad(!mutex_own(block_mutex));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- free_len = UT_LIST_GET_LEN(buf_pool->free);
- lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ free_len = UT_LIST_GET_LEN(buf_pool.free);
+ lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
}
- buf_pool->lru_hp.set(NULL);
+ buf_pool.lru_hp.set(NULL);
/* We keep track of all flushes happening as part of LRU
flush. When estimating the desired rate at which flush_list
should be flushed, we factor in this value. */
buf_lru_flush_page_count += n->flushed;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (n->evicted) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1793,22 +1793,22 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
ulint count = 0;
ulint scanned = 0;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
/* Start from the end of the list looking for a suitable
block to be flushed. */
- mutex_enter(&buf_pool->flush_list_mutex);
- ulint len = UT_LIST_GET_LEN(buf_pool->flush_list);
+ mutex_enter(&buf_pool.flush_list_mutex);
+ ulint len = UT_LIST_GET_LEN(buf_pool.flush_list);
/* In order not to degenerate this scan to O(n*n) we attempt
to preserve pointer of previous block in the flush list. To do
so we declare it a hazard pointer. Any thread working on the
flush list must check the hazard pointer and if it is removing
the same block then it must reset it. */
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
count < min_n && bpage != NULL && len > 0
&& bpage->oldest_modification < lsn_limit;
- bpage = buf_pool->flush_hp.get(),
+ bpage = buf_pool.flush_hp.get(),
++scanned) {
buf_page_t* prev;
@@ -1817,8 +1817,8 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
ut_ad(bpage->in_flush_list);
prev = UT_LIST_GET_PREV(list, bpage);
- buf_pool->flush_hp.set(prev);
- mutex_exit(&buf_pool->flush_list_mutex);
+ buf_pool.flush_hp.set(prev);
+ mutex_exit(&buf_pool.flush_list_mutex);
#ifdef UNIV_DEBUG
bool flushed =
@@ -1826,15 +1826,15 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
buf_flush_page_and_try_neighbors(
bpage, BUF_FLUSH_LIST, min_n, &count);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- ut_ad(flushed || buf_pool->flush_hp.is_hp(prev));
+ ut_ad(flushed || buf_pool.flush_hp.is_hp(prev));
--len;
}
- buf_pool->flush_hp.set(NULL);
- mutex_exit(&buf_pool->flush_list_mutex);
+ buf_pool.flush_hp.set(NULL);
+ mutex_exit(&buf_pool.flush_list_mutex);
if (scanned) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1852,7 +1852,7 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
count);
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(count);
}
@@ -1883,7 +1883,7 @@ buf_flush_batch(
ut_ad(flush_type == BUF_FLUSH_LRU
|| !sync_check_iterate(dict_sync_check()));
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* Note: The buffer pool mutex is released and reacquired within
the flush functions. */
@@ -1899,7 +1899,7 @@ buf_flush_batch(
ut_error;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
DBUG_LOG("ib_buf", "flush " << flush_type << " completed");
}
@@ -1931,23 +1931,23 @@ bool buf_flush_start(buf_flush_t flush_type)
{
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (buf_pool->n_flush[flush_type] > 0
- || buf_pool->init_flush[flush_type] == TRUE) {
+ if (buf_pool.n_flush[flush_type] > 0
+ || buf_pool.init_flush[flush_type] == TRUE) {
/* There is already a flush batch of the same type running */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(false);
}
- buf_pool->init_flush[flush_type] = TRUE;
+ buf_pool.init_flush[flush_type] = TRUE;
- os_event_reset(buf_pool->no_flush[flush_type]);
+ os_event_reset(buf_pool.no_flush[flush_type]);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(true);
}
@@ -1956,20 +1956,20 @@ bool buf_flush_start(buf_flush_t flush_type)
@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST */
void buf_flush_end(buf_flush_t flush_type)
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- buf_pool->init_flush[flush_type] = FALSE;
+ buf_pool.init_flush[flush_type] = FALSE;
- buf_pool->try_LRU_scan = TRUE;
+ buf_pool.try_LRU_scan = TRUE;
- if (buf_pool->n_flush[flush_type] == 0) {
+ if (buf_pool.n_flush[flush_type] == 0) {
/* The running flush batch has ended */
- os_event_set(buf_pool->no_flush[flush_type]);
+ os_event_set(buf_pool.no_flush[flush_type]);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (!srv_read_only_mode) {
buf_dblwr_flush_buffered_writes();
@@ -1982,7 +1982,7 @@ void buf_flush_wait_batch_end(buf_flush_t type)
{
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
thd_wait_begin(NULL, THD_WAIT_DISKIO);
- os_event_wait(buf_pool->no_flush[type]);
+ os_event_wait(buf_pool.no_flush[type]);
thd_wait_end(NULL);
}
@@ -2029,7 +2029,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
blocks, because anyway we need fsync to make chekpoint.
So, we don't need to wait for the batch end here. */
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_page_t* bpage;
@@ -2037,7 +2037,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
list. We would only need to write out temporary pages if the
page is about to be evicted from the buffer pool, and the page
contents is still needed (the page has not been freed). */
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage && fsp_is_system_temporary(bpage->id.space());
bpage = UT_LIST_GET_PREV(list, bpage)) {
ut_ad(bpage->in_flush_list);
@@ -2045,7 +2045,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
lsn_t oldest = bpage ? bpage->oldest_modification : 0;
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
if (oldest == 0 || oldest >= new_oldest) {
break;
@@ -2101,17 +2101,17 @@ bool buf_flush_single_page_from_LRU()
buf_page_t* bpage;
ibool freed;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- for (bpage = buf_pool->single_scan_itr.start(), scanned = 0,
+ for (bpage = buf_pool.single_scan_itr.start(), scanned = 0,
freed = false;
bpage != NULL;
- ++scanned, bpage = buf_pool->single_scan_itr.get()) {
+ ++scanned, bpage = buf_pool.single_scan_itr.get()) {
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
- buf_pool->single_scan_itr.set(prev);
+ buf_pool.single_scan_itr.set(prev);
BPageMutex* block_mutex;
block_mutex = buf_page_get_mutex(bpage);
@@ -2124,7 +2124,7 @@ bool buf_flush_single_page_from_LRU()
mutex_exit(block_mutex);
if (buf_LRU_free_page(bpage, true)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
freed = true;
break;
}
@@ -2156,7 +2156,7 @@ bool buf_flush_single_page_from_LRU()
if (!freed) {
/* Can't find a single flushable page. */
ut_ad(!bpage);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
if (scanned) {
@@ -2167,7 +2167,7 @@ bool buf_flush_single_page_from_LRU()
scanned);
}
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
return(freed);
}
@@ -2187,16 +2187,16 @@ static ulint buf_flush_LRU_list()
/* srv_LRU_scan_depth can be arbitrarily large value.
We cap it with current LRU size. */
- mutex_enter(&buf_pool->mutex);
- scan_depth = UT_LIST_GET_LEN(buf_pool->LRU);
- if (buf_pool->curr_size < buf_pool->old_size
- && buf_pool->withdraw_target > 0) {
- withdraw_depth = buf_pool->withdraw_target
- - UT_LIST_GET_LEN(buf_pool->withdraw);
+ mutex_enter(&buf_pool.mutex);
+ scan_depth = UT_LIST_GET_LEN(buf_pool.LRU);
+ if (buf_pool.curr_size < buf_pool.old_size
+ && buf_pool.withdraw_target > 0) {
+ withdraw_depth = buf_pool.withdraw_target
+ - UT_LIST_GET_LEN(buf_pool.withdraw);
} else {
withdraw_depth = 0;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (withdraw_depth > srv_LRU_scan_depth) {
scan_depth = ut_min(withdraw_depth, scan_depth);
} else {
@@ -2215,10 +2215,10 @@ static ulint buf_flush_LRU_list()
/** Wait for any possible LRU flushes to complete. */
void buf_flush_wait_LRU_batch_end()
{
- mutex_enter(&buf_pool->mutex);
- bool wait = buf_pool->n_flush[BUF_FLUSH_LRU]
- || buf_pool->init_flush[BUF_FLUSH_LRU];
- mutex_exit(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
+ bool wait = buf_pool.n_flush[BUF_FLUSH_LRU]
+ || buf_pool.init_flush[BUF_FLUSH_LRU];
+ mutex_exit(&buf_pool.mutex);
if (wait) {
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
}
@@ -2232,7 +2232,7 @@ static
ulint
af_get_pct_for_dirty()
{
- const ulint dirty = UT_LIST_GET_LEN(buf_pool->flush_list);
+ const ulint dirty = UT_LIST_GET_LEN(buf_pool.flush_list);
if (!dirty) {
/* No pages modified */
return 0;
@@ -2242,8 +2242,8 @@ af_get_pct_for_dirty()
pool (including the flush_list) was emptied while we are
looking at it) */
double dirty_pct = 100 * static_cast<double>(dirty)
- / static_cast<double>(1 + UT_LIST_GET_LEN(buf_pool->LRU)
- + UT_LIST_GET_LEN(buf_pool->free));
+ / static_cast<double>(1 + UT_LIST_GET_LEN(buf_pool.LRU)
+ + UT_LIST_GET_LEN(buf_pool.free));
ut_a(srv_max_dirty_pages_pct_lwm
<= srv_max_buf_pool_modified_pct);
@@ -2457,8 +2457,8 @@ page_cleaner_flush_pages_recommendation(ulint last_pages_in)
+ lsn_avg_rate * buf_flush_lsn_scan_factor;
ulint pages_for_lsn = 0;
- mutex_enter(&buf_pool->flush_list_mutex);
- for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool->flush_list);
+ mutex_enter(&buf_pool.flush_list_mutex);
+ for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool.flush_list);
b != NULL;
b = UT_LIST_GET_PREV(list, b)) {
if (b->oldest_modification > target_lsn) {
@@ -2466,7 +2466,7 @@ page_cleaner_flush_pages_recommendation(ulint last_pages_in)
}
++pages_for_lsn;
}
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
mutex_enter(&page_cleaner.mutex);
ut_ad(page_cleaner.slot.state == PAGE_CLEANER_STATE_NONE);
@@ -2834,7 +2834,7 @@ static os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner)(void*)
/* The page_cleaner skips sleep if the server is
idle and there are no pending IOs in the buffer pool
and there is work to do. */
- if (!n_flushed || !buf_pool->n_pend_reads
+ if (!n_flushed || !buf_pool.n_pend_reads
|| srv_check_activity(last_activity)) {
ret_sleep = pc_sleep_if_needed(
@@ -3088,7 +3088,7 @@ static os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner)(void*)
/* Some sanity checks */
ut_ad(!srv_any_background_activity());
ut_ad(srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE);
- ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == 0);
+ ut_a(UT_LIST_GET_LEN(buf_pool.flush_list) == 0);
/* We have lived our life. Time to die. */
@@ -3171,34 +3171,34 @@ static void buf_flush_validate_low()
buf_page_t* bpage;
const ib_rbt_node_t* rnode = NULL;
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
- ut_list_validate(buf_pool->flush_list, Check());
+ ut_list_validate(buf_pool.flush_list, Check());
- bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
+ bpage = UT_LIST_GET_FIRST(buf_pool.flush_list);
/* If we are in recovery mode i.e.: flush_rbt != NULL
then each block in the flush_list must also be present
in the flush_rbt. */
- if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
- rnode = rbt_first(buf_pool->flush_rbt);
+ if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
+ rnode = rbt_first(buf_pool.flush_rbt);
}
while (bpage != NULL) {
const lsn_t om = bpage->oldest_modification;
ut_ad(bpage->in_flush_list);
- /* A page in buf_pool->flush_list can be in
+ /* A page in buf_pool.flush_list can be in
BUF_BLOCK_REMOVE_HASH state. This happens when a page
is in the middle of being relocated. In that case the
original descriptor can have this state and still be
in the flush list waiting to acquire the
- buf_pool->flush_list_mutex to complete the relocation. */
+ buf_pool.flush_list_mutex to complete the relocation. */
ut_a(buf_page_in_file(bpage)
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
ut_a(om > 0);
- if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
+ if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
buf_page_t** prpage;
ut_a(rnode != NULL);
@@ -3206,7 +3206,7 @@ static void buf_flush_validate_low()
ut_a(*prpage != NULL);
ut_a(*prpage == bpage);
- rnode = rbt_next(buf_pool->flush_rbt, rnode);
+ rnode = rbt_next(buf_pool.flush_rbt, rnode);
}
bpage = UT_LIST_GET_NEXT(list, bpage);
@@ -3222,9 +3222,9 @@ static void buf_flush_validate_low()
/** Validate the flush list. */
void buf_flush_validate()
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
buf_flush_validate_low();
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 1c24076db15..2ee0c02d964 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -46,7 +46,7 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0mon.h"
/** The number of blocks from the LRU_old pointer onward, including
-the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
+the block pointed to, must be buf_pool.LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
of the whole LRU list length, except that the tolerance defined below
is allowed. Note that the tolerance must be small enough such that for
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
@@ -63,7 +63,7 @@ static const ulint BUF_LRU_OLD_TOLERANCE = 20;
/** When dropping the search hash index entries before deleting an ibd
file, we build a local array of pages belonging to that tablespace
in the buffer pool. Following is the size of that array.
-We also release buf_pool->mutex after scanning this many pages of the
+We also release buf_pool.mutex after scanning this many pages of the
flush_list when dropping a table. This is to ensure that other threads
are not blocked for extended period of time when using very large
buffer pools. */
@@ -130,7 +130,7 @@ Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
the object will be freed.
-The caller must hold buf_pool->mutex, the buf_page_get_mutex() mutex
+The caller must hold buf_pool.mutex, the buf_page_get_mutex() mutex
and the appropriate hash_lock. This function will release the
buf_page_get_mutex() and the hash_lock.
@@ -162,35 +162,35 @@ buf_LRU_block_free_hashed_page(
static inline void incr_LRU_size_in_bytes(const buf_page_t* bpage)
{
/* FIXME: use atomics, not mutex */
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
- buf_pool->stat.LRU_bytes += bpage->physical_size();
+ buf_pool.stat.LRU_bytes += bpage->physical_size();
- ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size);
+ ut_ad(buf_pool.stat.LRU_bytes <= buf_pool.curr_pool_size);
}
/** @return whether the unzip_LRU list should be used for evicting a victim
instead of the general LRU list */
bool buf_LRU_evict_from_unzip_LRU()
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
/* If the unzip_LRU list is empty, we can only use the LRU. */
- if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
+ if (UT_LIST_GET_LEN(buf_pool.unzip_LRU) == 0) {
return false;
}
/* If unzip_LRU is at most 10% of the size of the LRU list,
then use the LRU. This slack allows us to keep hot
decompressed pages in the buffer pool. */
- if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
- <= UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
+ if (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
+ <= UT_LIST_GET_LEN(buf_pool.LRU) / 10) {
return false;
}
/* If eviction hasn't started yet, we assume by default
that a workload is disk bound. */
- if (buf_pool->freed_page_clock == 0) {
+ if (buf_pool.freed_page_clock == 0) {
return true;
}
@@ -246,10 +246,10 @@ buf_LRU_drop_page_hash_for_tablespace(ulint id)
ulint num_entries = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
scan_again:
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL;
/* No op */) {
@@ -274,7 +274,7 @@ next_page:
/* This debug check uses a dirty read that could
theoretically cause false positives while
- buf_pool_clear_hash_index() is executing.
+ buf_pool.clear_hash_index() is executing.
(Other conflicting access paths to the adaptive hash
index should not be possible, because when a
tablespace is being discarded or dropped, there must
@@ -303,15 +303,15 @@ next_page:
goto next_page;
}
- /* Array full. We release the buf_pool->mutex to obey
+ /* Array full. We release the buf_pool.mutex to obey
the latching order. */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
num_entries = 0;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* Note that we released the buf_pool mutex above
after reading the prev_bpage during processing of a
@@ -336,7 +336,7 @@ next_page:
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Drop any remaining batch of search hashed pages. */
buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
@@ -383,14 +383,14 @@ static void buf_flush_yield(buf_page_t* bpage)
block mutexes. */
buf_page_set_sticky(bpage);
- /* Now it is safe to release the buf_pool->mutex. */
- mutex_exit(&buf_pool->mutex);
+ /* Now it is safe to release the buf_pool.mutex. */
+ mutex_exit(&buf_pool.mutex);
mutex_exit(block_mutex);
/* Try and force a context switch. */
os_thread_yield();
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(block_mutex);
/* "Unfix" the block now that we have both the
@@ -412,7 +412,7 @@ buf_flush_try_yield(
ulint processed) /*!< in: number of pages processed */
{
/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
- loop we release buf_pool->mutex to let other threads
+ loop we release buf_pool.mutex to let other threads
do their job but only if the block is not IO fixed. This
ensures that the block stays in its position in the
flush_list. */
@@ -421,14 +421,14 @@ buf_flush_try_yield(
&& processed >= BUF_LRU_DROP_SEARCH_SIZE
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
/* Release the buffer pool and block mutex
to give the other threads a go. */
buf_flush_yield(bpage);
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
/* Should not have been removed from the flush
list during the yield. However, this check is
@@ -449,12 +449,12 @@ buf_flush_try_yield(
@return true if page was removed. */
static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush)
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(mutex_own(&buf_pool->flush_list_mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(mutex_own(&buf_pool.flush_list_mutex));
/* bpage->space and bpage->io_fix are protected by
- buf_pool->mutex and block_mutex. It is safe to check
- them while holding buf_pool->mutex only. */
+ buf_pool.mutex and block_mutex. It is safe to check
+ them while holding buf_pool.mutex only. */
if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
@@ -474,9 +474,9 @@ static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush)
latching order. We are however guaranteed that the page
will stay in the flush_list and won't be relocated because
buf_flush_remove() and buf_flush_relocate_on_flush_list()
- need buf_pool->mutex as well. */
+ need buf_pool.mutex as well. */
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
mutex_enter(block_mutex);
@@ -498,7 +498,7 @@ static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush)
bpage, BUF_FLUSH_SINGLE_PAGE, false);
if (processed) {
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
} else {
mutex_exit(block_mutex);
}
@@ -506,10 +506,10 @@ static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush)
mutex_exit(block_mutex);
}
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
ut_ad(!mutex_own(block_mutex));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(processed);
}
@@ -528,11 +528,11 @@ static bool buf_flush_or_remove_pages(ulint id, bool flush, ulint first)
buf_page_t* bpage;
ulint processed = 0;
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
rescan:
bool all_freed = true;
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage != NULL;
bpage = prev) {
@@ -591,7 +591,7 @@ rescan:
#endif /* BTR_CUR_HASH_ADAPT */
}
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
return(all_freed);
}
@@ -604,21 +604,21 @@ as they age and move towards the tail of the LRU list.
@param[in] first first page to be flushed or evicted */
static void buf_flush_dirty_pages(ulint id, bool flush, ulint first)
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
while (!buf_flush_or_remove_pages(id, flush, first))
{
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
ut_d(buf_flush_validate());
os_thread_sleep(2000);
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
}
#ifdef UNIV_DEBUG
if (!first)
{
- mutex_enter(&buf_pool->flush_list_mutex);
+ mutex_enter(&buf_pool.flush_list_mutex);
- for (buf_page_t *bpage= UT_LIST_GET_FIRST(buf_pool->flush_list); bpage;
+ for (buf_page_t *bpage= UT_LIST_GET_FIRST(buf_pool.flush_list); bpage;
bpage= UT_LIST_GET_NEXT(list, bpage))
{
ut_ad(buf_page_in_file(bpage));
@@ -627,11 +627,11 @@ static void buf_flush_dirty_pages(ulint id, bool flush, ulint first)
ut_ad(id != bpage->id.space());
}
- mutex_exit(&buf_pool->flush_list_mutex);
+ mutex_exit(&buf_pool.flush_list_mutex);
}
#endif
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Empty the flush list for all pages belonging to a tablespace.
@@ -654,13 +654,13 @@ void buf_LRU_flush_or_remove_pages(ulint id, bool flush, ulint first)
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
-Insert a compressed block into buf_pool->zip_clean in the LRU order. */
+Insert a compressed block into buf_pool.zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
buf_page_t* bpage) /*!< in: pointer to the block in question */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
/* Find the first successor of bpage in the LRU list
@@ -677,9 +677,9 @@ buf_LRU_insert_zip_clean(
}
if (b != NULL) {
- UT_LIST_INSERT_AFTER(buf_pool->zip_clean, b, bpage);
+ UT_LIST_INSERT_AFTER(buf_pool.zip_clean, b, bpage);
} else {
- UT_LIST_ADD_FIRST(buf_pool->zip_clean, bpage);
+ UT_LIST_ADD_FIRST(buf_pool.zip_clean, bpage);
}
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -691,7 +691,7 @@ LRU list. The compressed page is preserved, and it need not be clean.
@return true if freed */
static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (!buf_LRU_evict_from_unzip_LRU()) {
return(false);
@@ -700,7 +700,7 @@ static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all)
ulint scanned = 0;
bool freed = false;
- for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
block != NULL
&& !freed
&& (scan_all || scanned < srv_LRU_scan_depth);
@@ -736,21 +736,21 @@ static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all)
@return whether a page was freed */
static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ulint scanned = 0;
bool freed = false;
- for (buf_page_t* bpage = buf_pool->lru_scan_itr.start();
+ for (buf_page_t* bpage = buf_pool.lru_scan_itr.start();
bpage != NULL
&& !freed
&& (scan_all || scanned < BUF_LRU_SEARCH_SCAN_THRESHOLD);
- ++scanned, bpage = buf_pool->lru_scan_itr.get()) {
+ ++scanned, bpage = buf_pool.lru_scan_itr.get()) {
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
BPageMutex* mutex = buf_page_get_mutex(bpage);
- buf_pool->lru_scan_itr.set(prev);
+ buf_pool.lru_scan_itr.set(prev);
mutex_enter(mutex);
@@ -770,10 +770,10 @@ static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
/* Keep track of pages that are evicted without
ever being accessed. This gives us a measure of
the effectiveness of readahead */
- ++buf_pool->stat.n_ra_pages_evicted;
+ ++buf_pool.stat.n_ra_pages_evicted;
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(mutex));
}
@@ -794,7 +794,7 @@ static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
@return true if found and freed */
bool buf_LRU_scan_and_free_block(bool scan_all)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
return(buf_LRU_free_from_unzip_LRU_list(scan_all)
|| buf_LRU_free_from_common_LRU_list(scan_all));
@@ -804,21 +804,21 @@ bool buf_LRU_scan_and_free_block(bool scan_all)
bool buf_LRU_buf_pool_running_out()
{
return !recv_recovery_is_on()
- && UT_LIST_GET_LEN(buf_pool->free)
- + UT_LIST_GET_LEN(buf_pool->LRU)
- < ut_min(buf_pool->curr_size, buf_pool->old_size) / 4;
+ && UT_LIST_GET_LEN(buf_pool.free)
+ + UT_LIST_GET_LEN(buf_pool.LRU)
+ < ut_min(buf_pool.curr_size, buf_pool.old_size) / 4;
}
-/** @return a buffer block from the buf_pool->free list
+/** @return a buffer block from the buf_pool.free list
@retval NULL if the free list is empty */
buf_block_t* buf_LRU_get_free_only()
{
buf_block_t* block;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
block = reinterpret_cast<buf_block_t*>(
- UT_LIST_GET_FIRST(buf_pool->free));
+ UT_LIST_GET_FIRST(buf_pool.free));
while (block != NULL) {
@@ -827,12 +827,12 @@ buf_block_t* buf_LRU_get_free_only()
ut_ad(!block->page.in_flush_list);
ut_ad(!block->page.in_LRU_list);
ut_a(!buf_page_in_file(&block->page));
- UT_LIST_REMOVE(buf_pool->free, &block->page);
+ UT_LIST_REMOVE(buf_pool.free, &block->page);
- if (buf_pool->curr_size >= buf_pool->old_size
- || UT_LIST_GET_LEN(buf_pool->withdraw)
- >= buf_pool->withdraw_target
- || !buf_block_will_be_withdrawn(block)) {
+ if (buf_pool.curr_size >= buf_pool.old_size
+ || UT_LIST_GET_LEN(buf_pool.withdraw)
+ >= buf_pool.withdraw_target
+ || !buf_pool.will_be_withdrawn(block->page)) {
/* found valid free block */
buf_page_mutex_enter(block);
/* No adaptive hash index entries may point to
@@ -848,12 +848,12 @@ buf_block_t* buf_LRU_get_free_only()
/* This should be withdrawn */
UT_LIST_ADD_LAST(
- buf_pool->withdraw,
+ buf_pool.withdraw,
&block->page);
ut_d(block->in_withdraw_list = TRUE);
block = reinterpret_cast<buf_block_t*>(
- UT_LIST_GET_FIRST(buf_pool->free));
+ UT_LIST_GET_FIRST(buf_pool.free));
}
return(block);
@@ -866,12 +866,12 @@ function will either assert or issue a warning and switch on the
status monitor. */
static void buf_LRU_check_size_of_non_data_objects()
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (!recv_recovery_is_on()
- && buf_pool->curr_size == buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->free)
- + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
+ && buf_pool.curr_size == buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.free)
+ + UT_LIST_GET_LEN(buf_pool.LRU) < buf_pool.curr_size / 20) {
ib::fatal() << "Over 95 percent of the buffer pool is"
" occupied by lock heaps"
@@ -881,13 +881,13 @@ static void buf_LRU_check_size_of_non_data_objects()
" Check that your transactions do not set too many"
" row locks, or review if"
" innodb_buffer_pool_size="
- << (buf_pool->curr_size >> (20U - srv_page_size_shift))
+ << (buf_pool.curr_size >> (20U - srv_page_size_shift))
<< "M could be bigger.";
} else if (!recv_recovery_is_on()
- && buf_pool->curr_size == buf_pool->old_size
- && (UT_LIST_GET_LEN(buf_pool->free)
- + UT_LIST_GET_LEN(buf_pool->LRU))
- < buf_pool->curr_size / 3) {
+ && buf_pool.curr_size == buf_pool.old_size
+ && (UT_LIST_GET_LEN(buf_pool.free)
+ + UT_LIST_GET_LEN(buf_pool.LRU))
+ < buf_pool.curr_size / 3) {
if (!buf_lru_switched_on_innodb_mon) {
@@ -903,7 +903,7 @@ static void buf_LRU_check_size_of_non_data_objects()
" Check that your transactions do not"
" set too many row locks."
" innodb_buffer_pool_size="
- << (buf_pool->curr_size >>
+ << (buf_pool.curr_size >>
(20U - srv_page_size_shift)) << "M."
" Starting the InnoDB Monitor to print"
" diagnostics.";
@@ -934,7 +934,7 @@ the free list. Even when we flush a page or find a page in LRU scan
we put it to free list to be used.
* iteration 0:
* get a block from free list, success:done
- * if buf_pool->try_LRU_scan is set
+ * if buf_pool.try_LRU_scan is set
* scan LRU up to srv_LRU_scan_depth to find a clean block
* the above will put the block on free list
* success:retry the free list
@@ -944,7 +944,7 @@ we put it to free list to be used.
* iteration 1:
* same as iteration 0 except:
* scan whole LRU list
- * scan LRU list even if buf_pool->try_LRU_scan is not set
+ * scan LRU list even if buf_pool.try_LRU_scan is not set
* iteration > 1:
* same as iteration 1 but sleep 10ms
@return the free control block, in state BUF_BLOCK_READY_FOR_USE */
@@ -957,7 +957,7 @@ buf_block_t* buf_LRU_get_free_block()
MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH);
loop:
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_LRU_check_size_of_non_data_objects();
@@ -970,7 +970,7 @@ loop:
block = buf_LRU_get_free_only();
if (block != NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
memset(&block->page.zip, 0, sizeof block->page.zip);
block->skip_flush_check = false;
return(block);
@@ -978,7 +978,7 @@ loop:
MONITOR_INC( MONITOR_LRU_GET_FREE_LOOPS );
freed = false;
- if (buf_pool->try_LRU_scan || n_iterations > 0) {
+ if (buf_pool.try_LRU_scan || n_iterations > 0) {
/* If no block was in the free list, search from the
end of the LRU list and try to free a block there.
If we are doing for the first time we'll scan only
@@ -991,7 +991,7 @@ loop:
in scanning the LRU list. This flag is set to
TRUE again when we flush a batch from this
buffer pool. */
- buf_pool->try_LRU_scan = FALSE;
+ buf_pool.try_LRU_scan = FALSE;
/* Also tell the page_cleaner thread that
there is work for it to do. */
@@ -1003,7 +1003,7 @@ loop:
not_found:
#endif
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (freed) {
goto loop;
@@ -1073,34 +1073,34 @@ static void buf_LRU_old_adjust_len()
ulint old_len;
ulint new_len;
- ut_a(buf_pool->LRU_old);
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
- ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
+ ut_a(buf_pool.LRU_old);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_ad(buf_pool.LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
+ ut_ad(buf_pool.LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
compile_time_assert(BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN
> BUF_LRU_OLD_RATIO_DIV
* (BUF_LRU_OLD_TOLERANCE + 5));
compile_time_assert(BUF_LRU_NON_OLD_MIN_LEN < BUF_LRU_OLD_MIN_LEN);
#ifdef UNIV_LRU_DEBUG
- /* buf_pool->LRU_old must be the first item in the LRU list
+ /* buf_pool.LRU_old must be the first item in the LRU list
whose "old" flag is set. */
- ut_a(buf_pool->LRU_old->old);
- ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
- || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
- ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
- || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
+ ut_a(buf_pool.LRU_old->old);
+ ut_a(!UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)
+ || !UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)->old);
+ ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)
+ || UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)->old);
#endif /* UNIV_LRU_DEBUG */
- old_len = buf_pool->LRU_old_len;
- new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
- * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
- UT_LIST_GET_LEN(buf_pool->LRU)
+ old_len = buf_pool.LRU_old_len;
+ new_len = ut_min(UT_LIST_GET_LEN(buf_pool.LRU)
+ * buf_pool.LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
+ UT_LIST_GET_LEN(buf_pool.LRU)
- (BUF_LRU_OLD_TOLERANCE
+ BUF_LRU_NON_OLD_MIN_LEN));
for (;;) {
- buf_page_t* LRU_old = buf_pool->LRU_old;
+ buf_page_t* LRU_old = buf_pool.LRU_old;
ut_a(LRU_old);
ut_ad(LRU_old->in_LRU_list);
@@ -1112,18 +1112,18 @@ static void buf_LRU_old_adjust_len()
if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) {
- buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV(
+ buf_pool.LRU_old = LRU_old = UT_LIST_GET_PREV(
LRU, LRU_old);
#ifdef UNIV_LRU_DEBUG
ut_a(!LRU_old->old);
#endif /* UNIV_LRU_DEBUG */
- old_len = ++buf_pool->LRU_old_len;
+ old_len = ++buf_pool.LRU_old_len;
buf_page_set_old(LRU_old, TRUE);
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
- buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
- old_len = --buf_pool->LRU_old_len;
+ buf_pool.LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
+ old_len = --buf_pool.LRU_old_len;
buf_page_set_old(LRU_old, FALSE);
} else {
return;
@@ -1135,14 +1135,14 @@ static void buf_LRU_old_adjust_len()
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
static void buf_LRU_old_init()
{
- ut_ad(mutex_own(&buf_pool->mutex));
- ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
+ ut_ad(mutex_own(&buf_pool.mutex));
+ ut_a(UT_LIST_GET_LEN(buf_pool.LRU) == BUF_LRU_OLD_MIN_LEN);
/* We first initialize all blocks in the LRU list as old and then use
the adjust function to move the LRU_old pointer to the right
position */
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_PREV(LRU, bpage)) {
@@ -1154,8 +1154,8 @@ static void buf_LRU_old_init()
bpage->old = TRUE;
}
- buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
- buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ buf_pool.LRU_old = UT_LIST_GET_FIRST(buf_pool.LRU);
+ buf_pool.LRU_old_len = UT_LIST_GET_LEN(buf_pool.LRU);
buf_LRU_old_adjust_len();
}
@@ -1165,7 +1165,7 @@ static void buf_LRU_old_init()
static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage)
{
ut_ad(buf_page_in_file(bpage));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (buf_page_belongs_to_unzip_LRU(bpage)) {
buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
@@ -1173,7 +1173,7 @@ static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage)
ut_ad(block->in_unzip_LRU_list);
ut_d(block->in_unzip_LRU_list = FALSE);
- UT_LIST_REMOVE(buf_pool->unzip_LRU, block);
+ UT_LIST_REMOVE(buf_pool.unzip_LRU, block);
}
}
@@ -1181,16 +1181,16 @@ static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage)
@param[in] bpage buffer page descriptor */
void buf_LRU_adjust_hp(const buf_page_t* bpage)
{
- buf_pool->lru_hp.adjust(bpage);
- buf_pool->lru_scan_itr.adjust(bpage);
- buf_pool->single_scan_itr.adjust(bpage);
+ buf_pool.lru_hp.adjust(bpage);
+ buf_pool.lru_scan_itr.adjust(bpage);
+ buf_pool.single_scan_itr.adjust(bpage);
}
/** Removes a block from the LRU list.
@param[in] bpage control block */
static inline void buf_LRU_remove_block(buf_page_t* bpage)
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
@@ -1203,12 +1203,12 @@ static inline void buf_LRU_remove_block(buf_page_t* bpage)
/* If the LRU_old pointer is defined and points to just this block,
move it backward one step */
- if (bpage == buf_pool->LRU_old) {
+ if (bpage == buf_pool.LRU_old) {
/* Below: the previous block is guaranteed to exist,
because the LRU_old pointer is only allowed to differ
by BUF_LRU_OLD_TOLERANCE from strict
- buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
+ buf_pool.LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
list length. */
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
@@ -1216,25 +1216,25 @@ static inline void buf_LRU_remove_block(buf_page_t* bpage)
#ifdef UNIV_LRU_DEBUG
ut_a(!prev_bpage->old);
#endif /* UNIV_LRU_DEBUG */
- buf_pool->LRU_old = prev_bpage;
+ buf_pool.LRU_old = prev_bpage;
buf_page_set_old(prev_bpage, TRUE);
- buf_pool->LRU_old_len++;
+ buf_pool.LRU_old_len++;
}
/* Remove the block from the LRU list */
- UT_LIST_REMOVE(buf_pool->LRU, bpage);
+ UT_LIST_REMOVE(buf_pool.LRU, bpage);
ut_d(bpage->in_LRU_list = FALSE);
- buf_pool->stat.LRU_bytes -= bpage->physical_size();
+ buf_pool.stat.LRU_bytes -= bpage->physical_size();
buf_unzip_LRU_remove_block_if_needed(bpage);
/* If the LRU list is so short that LRU_old is not defined,
clear the "old" flags and return */
- if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
+ if (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN) {
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -1243,18 +1243,18 @@ static inline void buf_LRU_remove_block(buf_page_t* bpage)
bpage->old = FALSE;
}
- buf_pool->LRU_old = NULL;
- buf_pool->LRU_old_len = 0;
+ buf_pool.LRU_old = NULL;
+ buf_pool.LRU_old_len = 0;
return;
}
- ut_ad(buf_pool->LRU_old);
+ ut_ad(buf_pool.LRU_old);
/* Update the LRU_old_len field if necessary */
if (buf_page_is_old(bpage)) {
- buf_pool->LRU_old_len--;
+ buf_pool.LRU_old_len--;
}
/* Adjust the length of the old block list if necessary */
@@ -1270,15 +1270,15 @@ buf_unzip_LRU_add_block(
ibool old) /*!< in: TRUE if should be put to the end
of the list, else put to the start */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
ut_ad(!block->in_unzip_LRU_list);
ut_d(block->in_unzip_LRU_list = TRUE);
if (old) {
- UT_LIST_ADD_LAST(buf_pool->unzip_LRU, block);
+ UT_LIST_ADD_LAST(buf_pool.unzip_LRU, block);
} else {
- UT_LIST_ADD_FIRST(buf_pool->unzip_LRU, block);
+ UT_LIST_ADD_FIRST(buf_pool.unzip_LRU, block);
}
}
@@ -1296,53 +1296,53 @@ buf_LRU_add_block_low(
LRU list is very short, the block is added to
the start, regardless of this parameter */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
ut_ad(!bpage->in_LRU_list);
- if (!old || (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN)) {
+ if (!old || (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN)) {
- UT_LIST_ADD_FIRST(buf_pool->LRU, bpage);
+ UT_LIST_ADD_FIRST(buf_pool.LRU, bpage);
- bpage->freed_page_clock = buf_pool->freed_page_clock
+ bpage->freed_page_clock = buf_pool.freed_page_clock
& ((1U << 31) - 1);
} else {
#ifdef UNIV_LRU_DEBUG
- /* buf_pool->LRU_old must be the first item in the LRU list
+ /* buf_pool.LRU_old must be the first item in the LRU list
whose "old" flag is set. */
- ut_a(buf_pool->LRU_old->old);
- ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
- || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
- ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
- || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
+ ut_a(buf_pool.LRU_old->old);
+ ut_a(!UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)
+ || !UT_LIST_GET_PREV(LRU, buf_pool.LRU_old)->old);
+ ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)
+ || UT_LIST_GET_NEXT(LRU, buf_pool.LRU_old)->old);
#endif /* UNIV_LRU_DEBUG */
- UT_LIST_INSERT_AFTER(buf_pool->LRU, buf_pool->LRU_old,
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, buf_pool.LRU_old,
bpage);
- buf_pool->LRU_old_len++;
+ buf_pool.LRU_old_len++;
}
ut_d(bpage->in_LRU_list = TRUE);
incr_LRU_size_in_bytes(bpage);
- if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
+ if (UT_LIST_GET_LEN(buf_pool.LRU) > BUF_LRU_OLD_MIN_LEN) {
- ut_ad(buf_pool->LRU_old);
+ ut_ad(buf_pool.LRU_old);
/* Adjust the length of the old block list if necessary */
buf_page_set_old(bpage, old);
buf_LRU_old_adjust_len();
- } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
+ } else if (UT_LIST_GET_LEN(buf_pool.LRU) == BUF_LRU_OLD_MIN_LEN) {
/* The LRU list is now long enough for LRU_old to become
defined: init it */
buf_LRU_old_init();
} else {
- buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
+ buf_page_set_old(bpage, buf_pool.LRU_old != NULL);
}
/* If this is a zipped block with decompressed frame as well
@@ -1376,10 +1376,10 @@ buf_LRU_make_block_young(
/*=====================*/
buf_page_t* bpage) /*!< in: control block */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
if (bpage->old) {
- buf_pool->stat.n_pages_made_young++;
+ buf_pool.stat.n_pages_made_young++;
}
buf_LRU_remove_block(bpage);
@@ -1391,10 +1391,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.
NOTE: If this function returns true, it will temporarily
-release buf_pool->mutex. Furthermore, the page frame will no longer be
+release buf_pool.mutex. Furthermore, the page frame will no longer be
accessible via bpage.
-The caller must hold buf_pool->mutex and must not hold any
+The caller must hold buf_pool.mutex and must not hold any
buf_page_get_mutex() when calling this function.
@return true if freed, false otherwise. */
bool
@@ -1408,7 +1408,7 @@ buf_LRU_free_page(
rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
@@ -1444,7 +1444,7 @@ func_exit:
new (b) buf_page_t(*bpage);
}
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
@@ -1505,7 +1505,7 @@ func_exit:
ut_ad(b->in_page_hash);
ut_ad(b->in_LRU_list);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
+ HASH_INSERT(buf_page_t, hash, buf_pool.page_hash,
b->id.fold(), b);
/* Insert b where bpage was in the LRU list. */
@@ -1515,23 +1515,23 @@ func_exit:
ut_ad(prev_b->in_LRU_list);
ut_ad(buf_page_in_file(prev_b));
- UT_LIST_INSERT_AFTER(buf_pool->LRU, prev_b, b);
+ UT_LIST_INSERT_AFTER(buf_pool.LRU, prev_b, b);
incr_LRU_size_in_bytes(b);
if (buf_page_is_old(b)) {
- buf_pool->LRU_old_len++;
- if (buf_pool->LRU_old
+ buf_pool.LRU_old_len++;
+ if (buf_pool.LRU_old
== UT_LIST_GET_NEXT(LRU, b)) {
- buf_pool->LRU_old = b;
+ buf_pool.LRU_old = b;
}
}
- lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
if (lru_len > BUF_LRU_OLD_MIN_LEN) {
- ut_ad(buf_pool->LRU_old);
+ ut_ad(buf_pool.LRU_old);
/* Adjust the length of the
old block list if necessary */
buf_LRU_old_adjust_len();
@@ -1556,7 +1556,7 @@ func_exit:
buf_LRU_insert_zip_clean(b);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
} else {
- /* Relocate on buf_pool->flush_list. */
+ /* Relocate on buf_pool.flush_list. */
buf_flush_relocate_on_flush_list(bpage, b);
}
@@ -1568,7 +1568,7 @@ func_exit:
/* Prevent buf_page_get_gen() from
decompressing the block while we release
- buf_pool->mutex and block_mutex. */
+ buf_pool.mutex and block_mutex. */
block_mutex = buf_page_get_mutex(b);
mutex_enter(block_mutex);
@@ -1580,7 +1580,7 @@ func_exit:
rw_lock_x_unlock(hash_lock);
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Remove possible adaptive hash index on the page.
The page was declared uninitialized by
@@ -1600,7 +1600,7 @@ func_exit:
checksum while not holding any mutex. The
block is already half-freed
(BUF_BLOCK_REMOVE_HASH) and removed from
- buf_pool->page_hash, thus inaccessible by any
+ buf_pool.page_hash, thus inaccessible by any
other thread. */
ut_ad(b->zip_size());
@@ -1615,7 +1615,7 @@ func_exit:
checksum);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (b != NULL) {
mutex_enter(block_mutex);
@@ -1639,7 +1639,7 @@ buf_LRU_block_free_non_file_page(
{
void* data;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_mutex_own(block));
switch (buf_block_get_state(block)) {
@@ -1687,16 +1687,16 @@ buf_LRU_block_free_non_file_page(
page_zip_set_size(&block->page.zip, 0);
}
- if (buf_pool->curr_size < buf_pool->old_size
- && UT_LIST_GET_LEN(buf_pool->withdraw) < buf_pool->withdraw_target
- && buf_block_will_be_withdrawn(block)) {
+ if (buf_pool.curr_size < buf_pool.old_size
+ && UT_LIST_GET_LEN(buf_pool.withdraw) < buf_pool.withdraw_target
+ && buf_pool.will_be_withdrawn(block->page)) {
/* This should be withdrawn */
UT_LIST_ADD_LAST(
- buf_pool->withdraw,
+ buf_pool.withdraw,
&block->page);
ut_d(block->in_withdraw_list = TRUE);
} else {
- UT_LIST_ADD_FIRST(buf_pool->free, &block->page);
+ UT_LIST_ADD_FIRST(buf_pool.free, &block->page);
ut_d(block->page.in_free_list = TRUE);
}
@@ -1708,7 +1708,7 @@ Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
the object will be freed.
-The caller must hold buf_pool->mutex, the buf_page_get_mutex() mutex
+The caller must hold buf_pool.mutex, the buf_page_get_mutex() mutex
and the appropriate hash_lock. This function will release the
buf_page_get_mutex() and the hash_lock.
@@ -1730,7 +1730,7 @@ buf_LRU_block_remove_hashed(
const buf_page_t* hashed_bpage;
rw_lock_t* hash_lock;
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
hash_lock = buf_page_hash_lock_get(bpage->id);
@@ -1742,7 +1742,7 @@ buf_LRU_block_remove_hashed(
buf_LRU_remove_block(bpage);
- buf_pool->freed_page_clock += 1;
+ buf_pool.freed_page_clock += 1;
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_FILE_PAGE:
@@ -1840,10 +1840,9 @@ buf_LRU_block_remove_hashed(
ut_d(mutex_exit(buf_page_get_mutex(bpage)));
ut_d(rw_lock_x_unlock(hash_lock));
- ut_d(mutex_exit(&buf_pool->mutex));
- ut_d(buf_print());
+ ut_d(mutex_exit(&buf_pool.mutex));
+ ut_d(buf_pool.print());
ut_d(buf_LRU_print());
- ut_d(buf_validate());
ut_d(buf_LRU_validate());
ut_ad(0);
}
@@ -1852,7 +1851,7 @@ buf_LRU_block_remove_hashed(
ut_ad(bpage->in_page_hash);
ut_d(bpage->in_page_hash = FALSE);
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, bpage->id.fold(),
+ HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, bpage->id.fold(),
bpage);
switch (buf_page_get_state(bpage)) {
@@ -1864,9 +1863,9 @@ buf_LRU_block_remove_hashed(
ut_a(bpage->zip.ssize);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- UT_LIST_REMOVE(buf_pool->zip_clean, bpage);
+ UT_LIST_REMOVE(buf_pool.zip_clean, bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- mutex_exit(&buf_pool->zip_mutex);
+ mutex_exit(&buf_pool.zip_mutex);
rw_lock_x_unlock(hash_lock);
buf_pool_mutex_exit_forbid();
@@ -1952,11 +1951,11 @@ buf_LRU_block_free_hashed_page(
buf_block_t* block) /*!< in: block, must contain a file page and
be in a state where it can be freed */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
buf_page_mutex_enter(block);
- if (buf_pool->flush_rbt == NULL) {
+ if (buf_pool.flush_rbt == NULL) {
block->page.id
= page_id_t(ULINT32_UNDEFINED, ULINT32_UNDEFINED);
}
@@ -1977,7 +1976,7 @@ void buf_LRU_free_one_page(buf_page_t* bpage, page_id_t old_page_id)
rw_lock_t* hash_lock = buf_page_hash_lock_get(old_page_id);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
rw_lock_x_lock(hash_lock);
@@ -2000,11 +1999,11 @@ void buf_LRU_free_one_page(buf_page_t* bpage, page_id_t old_page_id)
ut_ad(!mutex_own(block_mutex));
}
-/** Update buf_pool->LRU_old_ratio.
+/** Update buf_pool.LRU_old_ratio.
@param[in] old_pct Reserve this percentage of
the buffer pool for "old" blocks
@param[in] adjust true=adjust the LRU list;
- false=just assign buf_pool->LRU_old_ratio
+ false=just assign buf_pool.LRU_old_ratio
during the initialization of InnoDB
@return updated old_pct */
uint buf_LRU_old_ratio_update(uint old_pct, bool adjust)
@@ -2017,20 +2016,20 @@ uint buf_LRU_old_ratio_update(uint old_pct, bool adjust)
}
if (adjust) {
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (ratio != buf_pool->LRU_old_ratio) {
- buf_pool->LRU_old_ratio = ratio;
+ if (ratio != buf_pool.LRU_old_ratio) {
+ buf_pool.LRU_old_ratio = ratio;
- if (UT_LIST_GET_LEN(buf_pool->LRU)
+ if (UT_LIST_GET_LEN(buf_pool.LRU)
>= BUF_LRU_OLD_MIN_LEN) {
buf_LRU_old_adjust_len();
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
} else {
- buf_pool->LRU_old_ratio = ratio;
+ buf_pool.LRU_old_ratio = ratio;
}
/* the reverse of
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
@@ -2046,7 +2045,7 @@ buf_LRU_stat_update()
buf_LRU_stat_t* item;
buf_LRU_stat_t cur_stat;
- if (!buf_pool->freed_page_clock) {
+ if (!buf_pool.freed_page_clock) {
goto func_exit;
}
@@ -2081,17 +2080,17 @@ void buf_LRU_validate()
ulint old_len;
ulint new_len;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
+ if (UT_LIST_GET_LEN(buf_pool.LRU) >= BUF_LRU_OLD_MIN_LEN) {
- ut_a(buf_pool->LRU_old);
- old_len = buf_pool->LRU_old_len;
+ ut_a(buf_pool.LRU_old);
+ old_len = buf_pool.LRU_old_len;
- new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
- * buf_pool->LRU_old_ratio
+ new_len = ut_min(UT_LIST_GET_LEN(buf_pool.LRU)
+ * buf_pool.LRU_old_ratio
/ BUF_LRU_OLD_RATIO_DIV,
- UT_LIST_GET_LEN(buf_pool->LRU)
+ UT_LIST_GET_LEN(buf_pool.LRU)
- (BUF_LRU_OLD_TOLERANCE
+ BUF_LRU_NON_OLD_MIN_LEN));
@@ -2103,7 +2102,7 @@ void buf_LRU_validate()
old_len = 0;
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -2130,7 +2129,7 @@ void buf_LRU_validate()
= UT_LIST_GET_NEXT(LRU, bpage);
if (!old_len++) {
- ut_a(buf_pool->LRU_old == bpage);
+ ut_a(buf_pool.LRU_old == bpage);
} else {
ut_a(!prev || buf_page_is_old(prev));
}
@@ -2139,11 +2138,11 @@ void buf_LRU_validate()
}
}
- ut_a(buf_pool->LRU_old_len == old_len);
+ ut_a(buf_pool.LRU_old_len == old_len);
CheckInFreeList::validate();
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->free);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.free);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(list, bpage)) {
@@ -2152,7 +2151,7 @@ void buf_LRU_validate()
CheckUnzipLRUAndLRUList::validate();
- for (buf_block_t* block = UT_LIST_GET_FIRST(buf_pool->unzip_LRU);
+ for (buf_block_t* block = UT_LIST_GET_FIRST(buf_pool.unzip_LRU);
block != NULL;
block = UT_LIST_GET_NEXT(unzip_LRU, block)) {
@@ -2161,7 +2160,7 @@ void buf_LRU_validate()
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -2169,9 +2168,9 @@ void buf_LRU_validate()
/** Dump the LRU list to stderr. */
void buf_LRU_print()
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- for (const buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ for (const buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage != NULL;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@@ -2224,6 +2223,6 @@ void buf_LRU_print()
mutex_exit(buf_page_get_mutex(bpage));
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 232c7e0e8b2..4dceaf8f524 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -41,7 +41,7 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
-/** If there are buf_pool->curr_size per the number below pending reads, then
+/** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with
i/o-fixed buffer blocks */
#define BUF_READ_AHEAD_PEND_LIMIT 2
@@ -60,7 +60,7 @@ buf_read_page_handle_error(
const page_id_t old_page_id = bpage->id;
/* First unfix and release lock on the bpage */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
mutex_enter(buf_page_get_mutex(bpage));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ);
@@ -79,10 +79,10 @@ buf_read_page_handle_error(
/* remove the block from LRU list */
buf_LRU_free_one_page(bpage, old_page_id);
- ut_ad(buf_pool->n_pend_reads > 0);
- buf_pool->n_pend_reads--;
+ ut_ad(buf_pool.n_pend_reads > 0);
+ buf_pool.n_pend_reads--;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/** Low-level function which reads a page asynchronously from a file to the
@@ -245,7 +245,7 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
}
const ulint buf_read_ahead_random_area
- = buf_pool->read_ahead_area;
+ = buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_random_area)
* buf_read_ahead_random_area;
@@ -282,11 +282,11 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
return(0);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
- mutex_exit(&buf_pool->mutex);
+ if (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -300,14 +300,14 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
if (buf_page_is_accessed(bpage)
&& buf_page_peek_if_young(bpage)
&& ++recent_blocks
- >= 5 + buf_pool->read_ahead_area / 8) {
- mutex_exit(&buf_pool->mutex);
+ >= 5 + buf_pool.read_ahead_area / 8) {
+ mutex_exit(&buf_pool.mutex);
goto read_ahead;
}
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Do nothing */
return(0);
@@ -357,7 +357,7 @@ read_ahead:
LRU policy decision. */
buf_LRU_stat_inc_io();
- buf_pool->stat.n_ra_pages_read_rnd += count;
+ buf_pool.stat.n_ra_pages_read_rnd += count;
srv_stats.buf_pool_reads.add(count);
return(count);
}
@@ -501,7 +501,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
}
const ulint buf_read_ahead_linear_area
- = buf_pool->read_ahead_area;
+ = buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_linear_area)
* buf_read_ahead_linear_area;
high = (page_id.page_no() / buf_read_ahead_linear_area + 1)
@@ -539,11 +539,11 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
return(0);
}
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- if (buf_pool->n_pend_reads
- > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
- mutex_exit(&buf_pool->mutex);
+ if (buf_pool.n_pend_reads
+ > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -561,7 +561,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
/* How many out of order accessed pages can we ignore
when working out the access pattern for linear readahead */
threshold = ut_min(static_cast<ulint>(64 - srv_read_ahead_threshold),
- buf_pool->read_ahead_area);
+ buf_pool.read_ahead_area);
fail_count = 0;
@@ -592,7 +592,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
if (fail_count > threshold) {
/* Too many failures: return */
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -607,7 +607,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
bpage = buf_page_hash_get(page_id);
if (bpage == NULL) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(0);
}
@@ -633,7 +633,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if ((page_id.page_no() == low)
&& (succ_offset == page_id.page_no() + 1)) {
@@ -715,7 +715,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
LRU policy decision. */
buf_LRU_stat_inc_io();
- buf_pool->stat.n_ra_pages_read += count;
+ buf_pool.stat.n_ra_pages_read += count;
return(count);
}
@@ -748,7 +748,7 @@ buf_read_recv_pages(
const page_id_t cur_page_id(space_id, page_nos[i]);
for (ulint count = 0, limit = recv_sys.max_blocks() / 2;
- buf_pool->n_pend_reads >= limit; ) {
+ buf_pool.n_pend_reads >= limit; ) {
os_thread_sleep(10000);
@@ -757,7 +757,7 @@ buf_read_recv_pages(
ib::error()
<< "Waited for " << count / 100
<< " seconds for "
- << buf_pool->n_pend_reads
+ << buf_pool.n_pend_reads
<< " pending reads";
}
}
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index 041ceed7801..9d42ed8cca1 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1275,7 +1275,7 @@ rtr_cur_restore_position(
ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
- if (!buf_pool_is_obsolete(r_cursor->withdraw_clock)
+ if (!buf_pool.is_obsolete(r_cursor->withdraw_clock)
&& buf_page_optimistic_get(RW_X_LATCH,
r_cursor->block_when_stored,
r_cursor->modify_clock,
diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc
index ef4ca0cba16..de271eabc31 100644
--- a/storage/innobase/ha/ha0ha.cc
+++ b/storage/innobase/ha/ha0ha.cc
@@ -72,7 +72,7 @@ ib_create(
if (type == MEM_HEAP_FOR_PAGE_HASH) {
/* We create a hash table protected by rw_locks for
- buf_pool->page_hash. */
+ buf_pool.page_hash. */
hash_create_sync_obj(
table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj);
} else {
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index a2849c18aa7..2107153b48e 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -18243,9 +18243,9 @@ innodb_buffer_pool_evict_uncompressed()
{
bool all_evicted = true;
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
- for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
block != NULL; ) {
buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
@@ -18260,7 +18260,7 @@ innodb_buffer_pool_evict_uncompressed()
block = prev_block;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
return(all_evicted);
}
@@ -21431,10 +21431,10 @@ innodb_buffer_pool_size_validate(
#endif /* UNIV_DEBUG */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size != srv_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
my_printf_error(ER_WRONG_ARGUMENTS,
"Another buffer pool resize is already in progress.", MYF(0));
return(1);
@@ -21445,13 +21445,13 @@ innodb_buffer_pool_size_validate(
*static_cast<ulonglong*>(save) = requested_buf_pool_size;
if (srv_buf_pool_size == ulint(intbuf)) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* nothing to do */
return(0);
}
if (srv_buf_pool_size == requested_buf_pool_size) {
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_ARGUMENTS,
"innodb_buffer_pool_size must be at least"
@@ -21462,7 +21462,7 @@ innodb_buffer_pool_size_validate(
}
srv_buf_pool_size = requested_buf_pool_size;
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (intbuf != static_cast<longlong>(requested_buf_pool_size)) {
char buf[64];
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index ed560eb52b3..da0ca1e85d9 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -126,9 +126,9 @@ struct buf_page_info_t{
built on this page */
#endif /* BTR_CUR_HASH_ADAPT */
unsigned is_old:1; /*!< TRUE if the block is in the old
- blocks in buf_pool->LRU_old */
+ blocks in buf_pool.LRU_old */
unsigned freed_page_clock:31; /*!< the value of
- buf_pool->freed_page_clock */
+ buf_pool.freed_page_clock */
unsigned zip_ssize:PAGE_ZIP_SSIZE_BITS;
/*!< Compressed page size */
unsigned page_state:BUF_PAGE_STATE_BITS; /*!< Page state */
@@ -1637,22 +1637,22 @@ i_s_cmpmem_fill_low(
buf_buddy_stat_t buddy_stat_local[BUF_BUDDY_SIZES_MAX + 1];
/* Save buddy stats for buffer pool in local variables. */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
zip_free_len_local[x] = (x < BUF_BUDDY_SIZES) ?
- UT_LIST_GET_LEN(buf_pool->zip_free[x]) : 0;
+ UT_LIST_GET_LEN(buf_pool.zip_free[x]) : 0;
- buddy_stat_local[x] = buf_pool->buddy_stat[x];
+ buddy_stat_local[x] = buf_pool.buddy_stat[x];
if (reset) {
- /* This is protected by buf_pool->mutex. */
- buf_pool->buddy_stat[x].relocated = 0;
- buf_pool->buddy_stat[x].relocated_usec = 0;
+ /* This is protected by buf_pool.mutex. */
+ buf_pool.buddy_stat[x].relocated = 0;
+ buf_pool.buddy_stat[x].relocated_usec = 0;
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat = &buddy_stat_local[x];
@@ -4247,7 +4247,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
heap = mem_heap_create(10000);
for (ulint n = 0;
- n < ut_min(buf_pool->n_chunks, buf_pool->n_chunks_new); n++) {
+ n < ut_min(buf_pool.n_chunks, buf_pool.n_chunks_new); n++) {
const buf_block_t* block;
ulint n_blocks;
buf_page_info_t* info_buffer;
@@ -4258,8 +4258,8 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
ulint block_id = 0;
/* Get buffer block of the nth chunk */
- block = buf_pool->chunks[n].blocks;
- chunk_size = buf_pool->chunks[n].size;
+ block = buf_pool.chunks[n].blocks;
+ chunk_size = buf_pool.chunks[n].size;
num_page = 0;
while (chunk_size > 0) {
@@ -4280,7 +4280,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
buffer pool info printout, we are not required to
preserve the overall consistency, so we can
release mutex periodically */
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
/* GO through each block in the chunk */
for (n_blocks = num_to_process; n_blocks--; block++) {
@@ -4291,7 +4291,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
num_page++;
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
/* Fill in information schema table with information
just collected from the buffer chunk scan */
@@ -4615,10 +4615,10 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
}
/* Aquire the mutex before allocating info_buffer, since
- UT_LIST_GET_LEN(buf_pool->LRU) could change */
- mutex_enter(&buf_pool->mutex);
+ UT_LIST_GET_LEN(buf_pool.LRU) could change */
+ mutex_enter(&buf_pool.mutex);
- lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
+ lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
/* Print error message if malloc fail */
info_buffer = (buf_page_info_t*) my_malloc(PSI_INSTRUMENT_ME,
@@ -4633,7 +4633,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
/* Walk through Pool's LRU list and print the buffer page
information */
- bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ bpage = UT_LIST_GET_LAST(buf_pool.LRU);
while (bpage != NULL) {
/* Use the same function that collect buffer info for
@@ -4647,10 +4647,10 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
}
ut_ad(lru_pos == lru_len);
- ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool->LRU));
+ ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool.LRU));
exit:
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
if (info_buffer) {
status = i_s_innodb_buf_page_lru_fill(
@@ -7209,7 +7209,7 @@ i_s_innodb_mutexes_fill_table(
continue;
}
- if (buf_pool_is_block_mutex(mutex)) {
+ if (buf_pool.is_block_mutex(mutex)) {
block_mutex = mutex;
block_mutex_oswait_count += mutex->count_os_wait;
continue;
@@ -7257,7 +7257,7 @@ i_s_innodb_mutexes_fill_table(
continue;
}
- if (buf_pool_is_block_lock(lock)) {
+ if (buf_pool.is_block_lock(lock)) {
block_lock = lock;
block_lock_oswait_count += lock->count_os_wait;
continue;
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 32386c02d5f..cc62c4cad77 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -110,7 +110,7 @@ btr_search_move_or_delete_hash_entries(
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
- has already been removed from the buf_pool->page_hash
+ has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block);
diff --git a/storage/innobase/include/buf0buddy.h b/storage/innobase/include/buf0buddy.h
index 117e1cb8736..bd1cc06aca6 100644
--- a/storage/innobase/include/buf0buddy.h
+++ b/storage/innobase/include/buf0buddy.h
@@ -31,7 +31,7 @@ Created December 2006 by Marko Makela
/**
@param[in] block size in bytes
-@return index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
+@return index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
inline
ulint
buf_buddy_get_slot(ulint size)
@@ -50,16 +50,16 @@ buf_buddy_get_slot(ulint size)
}
/** Allocate a ROW_FORMAT=COMPRESSED block.
-@param[in] i index of buf_pool->zip_free[] or BUF_BUDDY_SIZES
-@param[out] lru whether buf_pool->mutex was temporarily released
+@param[in] i index of buf_pool.zip_free[] or BUF_BUDDY_SIZES
+@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
byte *buf_buddy_alloc_low(ulint i, bool *lru) MY_ATTRIBUTE((malloc));
/** Allocate a ROW_FORMAT=COMPRESSED block.
-The caller must not hold buf_pool->mutex nor buf_pool->zip_mutex nor any
+The caller must not hold buf_pool.mutex nor buf_pool.zip_mutex nor any
block->mutex.
@param[in] size compressed page size
-@param[out] lru whether buf_pool->mutex was temporarily released
+@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
inline byte *buf_buddy_alloc(ulint size, bool *lru= nullptr)
{
@@ -69,7 +69,7 @@ inline byte *buf_buddy_alloc(ulint size, bool *lru= nullptr)
/** Deallocate a block.
@param[in] buf block to be freed, must not be pointed to
by the buffer pool
-@param[in] i index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
+@param[in] i index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
void buf_buddy_free_low(void* buf, ulint i);
/** Deallocate a block.
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 0912308ffcc..8bf9269418f 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -74,16 +74,6 @@ struct fil_addr_t;
#define MAX_PAGE_HASH_LOCKS 1024 /*!< The maximum number of
page_hash locks */
-extern buf_pool_t* buf_pool; /*!< The buffer pools
- of the database */
-
-extern volatile bool buf_pool_withdrawing; /*!< true when withdrawing buffer
- pool pages might cause page relocation */
-
-extern volatile ulint buf_withdraw_clock; /*!< the clock is incremented
- every time a pointer to a page may
- become obsolete */
-
# ifdef UNIV_DEBUG
extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
buffer pool is not allowed. */
@@ -95,12 +85,12 @@ extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
The enumeration values must be 0..7. */
enum buf_page_state {
BUF_BLOCK_POOL_WATCH, /*!< a sentinel for the buffer pool
- watch, element of buf_pool->watch[] */
+ watch, element of buf_pool.watch[] */
BUF_BLOCK_ZIP_PAGE, /*!< contains a clean
compressed page */
BUF_BLOCK_ZIP_DIRTY, /*!< contains a compressed
page that is in the
- buf_pool->flush_list */
+ buf_pool.flush_list */
BUF_BLOCK_NOT_USED, /*!< is in the free list;
must be after the BUF_BLOCK_ZIP_
@@ -121,13 +111,13 @@ struct buf_pool_info_t
{
/* General buffer pool info */
ulint pool_size; /*!< Buffer Pool size in pages */
- ulint lru_len; /*!< Length of buf_pool->LRU */
- ulint old_lru_len; /*!< buf_pool->LRU_old_len */
- ulint free_list_len; /*!< Length of buf_pool->free list */
- ulint flush_list_len; /*!< Length of buf_pool->flush_list */
- ulint n_pend_unzip; /*!< buf_pool->n_pend_unzip, pages
+ ulint lru_len; /*!< Length of buf_pool.LRU */
+ ulint old_lru_len; /*!< buf_pool.LRU_old_len */
+ ulint free_list_len; /*!< Length of buf_pool.free list */
+ ulint flush_list_len; /*!< Length of buf_pool.flush_list */
+ ulint n_pend_unzip; /*!< buf_pool.n_pend_unzip, pages
pending decompress */
- ulint n_pend_reads; /*!< buf_pool->n_pend_reads, pages
+ ulint n_pend_reads; /*!< buf_pool.n_pend_reads, pages
pending read */
ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */
ulint n_pending_flush_single_page;/*!< Pages pending to be
@@ -138,15 +128,15 @@ struct buf_pool_info_t
LIST */
ulint n_pages_made_young; /*!< number of pages made young */
ulint n_pages_not_made_young; /*!< number of pages not made young */
- ulint n_pages_read; /*!< buf_pool->n_pages_read */
- ulint n_pages_created; /*!< buf_pool->n_pages_created */
- ulint n_pages_written; /*!< buf_pool->n_pages_written */
- ulint n_page_gets; /*!< buf_pool->n_page_gets */
- ulint n_ra_pages_read_rnd; /*!< buf_pool->n_ra_pages_read_rnd,
+ ulint n_pages_read; /*!< buf_pool.n_pages_read */
+ ulint n_pages_created; /*!< buf_pool.n_pages_created */
+ ulint n_pages_written; /*!< buf_pool.n_pages_written */
+ ulint n_page_gets; /*!< buf_pool.n_page_gets */
+ ulint n_ra_pages_read_rnd; /*!< buf_pool.n_ra_pages_read_rnd,
number of pages readahead */
- ulint n_ra_pages_read; /*!< buf_pool->n_ra_pages_read, number
+ ulint n_ra_pages_read; /*!< buf_pool.n_ra_pages_read, number
of pages readahead */
- ulint n_ra_pages_evicted; /*!< buf_pool->n_ra_pages_evicted,
+ ulint n_ra_pages_evicted; /*!< buf_pool.n_ra_pages_evicted,
number of readahead pages evicted
without access */
ulint n_page_get_delta; /*!< num of buffer pool page gets since
@@ -176,7 +166,7 @@ struct buf_pool_info_t
without access, in pages per second */
/* Stats about LRU eviction */
- ulint unzip_lru_len; /*!< length of buf_pool->unzip_LRU
+ ulint unzip_lru_len; /*!< length of buf_pool.unzip_LRU
list */
/* Counters for LRU policy */
ulint io_sum; /*!< buf_LRU_stat_sum.io */
@@ -199,28 +189,6 @@ operator<<(
const page_id_t page_id);
#ifndef UNIV_INNOCHECKSUM
-/** Create the buffer pool.
-@return whether the creation failed */
-bool buf_pool_init();
-/** Free the buffer pool at shutdown.
-This must not be invoked before freeing all mutexes. */
-void buf_pool_free();
-
-/** Determines if a block is intended to be withdrawn.
-@param[in] block pointer to control block
-@retval true if will be withdrawn */
-bool buf_block_will_be_withdrawn(const buf_block_t* block);
-
-/** Determines if a frame is intended to be withdrawn.
-@param[in] ptr pointer to a frame
-@retval true if will be withdrawn */
-bool buf_frame_will_be_withdrawn(const byte* ptr);
-
-#ifdef BTR_CUR_HASH_ADAPT
-/** Clear the adaptive hash index on all pages in the buffer pool. */
-void buf_pool_clear_hash_index();
-#endif /* BTR_CUR_HASH_ADAPT */
-
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@@ -228,13 +196,6 @@ UNIV_INLINE
ulint
buf_pool_get_curr_size(void);
/*========================*/
-/*********************************************************************//**
-Gets the current size of buffer buf_pool in frames.
-@return size in pages */
-UNIV_INLINE
-ulint
-buf_pool_get_n_pages(void);
-/*=======================*/
/**
@return the smallest oldest_modification lsn for any page.
@retval 0 if all modified persistent pages have been flushed */
@@ -455,7 +416,7 @@ inline void buf_page_make_young_if_needed(buf_page_t *bpage)
/********************************************************************//**
Increments the modify clock of a frame by 1. The caller must (1) own the
-buf_pool->mutex and block bufferfix count has to be zero, (2) or own an x-lock
+buf_pool.mutex and block bufferfix count has to be zero, (2) or own an x-lock
on the block. */
UNIV_INLINE
void
@@ -660,15 +621,6 @@ buf_frame_align(
byte* ptr); /* in: pointer to a frame */
-#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
-/** Validate the buffer pool. */
-void buf_validate();
-#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
-#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
-/** Write information of the buf_pool to the error log. */
-void buf_print();
-#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
-
/** Dump a page to stderr.
@param[in] read_buf database page
@param[in] zip_size compressed page size, or 0 */
@@ -700,9 +652,6 @@ void buf_stats_get_pool_info(buf_pool_info_t *pool_info);
/** Refresh the statistics used to print per-second averages. */
void buf_refresh_io_stats();
-/** Assert that all buffer pool pages are in a replaceable state */
-void buf_assert_all_freed();
-
/** Check that there currently are no I/O operations pending.
@return number of pending i/o */
ulint buf_pool_check_no_pending_io();
@@ -857,7 +806,7 @@ buf_block_set_io_fix(
enum buf_io_fix io_fix);/*!< in: io_fix state */
/*********************************************************************//**
Makes a block sticky. A sticky block implies that even after we release
-the buf_pool->mutex and the block->mutex:
+the buf_pool.mutex and the block->mutex:
* it cannot be removed from the flush_list
* the block descriptor cannot be relocated
* it cannot be removed from the LRU list
@@ -954,31 +903,6 @@ if applicable. */
#define is_buf_block_get_page_zip(block) \
UNIV_LIKELY_NULL((block)->page.zip.data)
-#ifdef BTR_CUR_HASH_ADAPT
-/** Get a buffer block from an adaptive hash index pointer.
-This function does not return if the block is not identified.
-@param[in] ptr pointer to within a page frame
-@return pointer to block, never NULL */
-buf_block_t*
-buf_block_from_ahi(const byte* ptr);
-#endif /* BTR_CUR_HASH_ADAPT */
-
-/** Determine if a pointer belongs to a buf_block_t. It can be a pointer to
-the buf_block_t itself or a member of it.
-@param ptr a pointer that will not be dereferenced
-@return whether the ptr belongs to a buf_block_t struct */
-bool buf_pointer_is_block_field(const void* ptr);
-/** Find out if a pointer corresponds to a buf_block_t::mutex.
-@param m in: mutex candidate
-@return TRUE if m is a buf_block_t::mutex */
-#define buf_pool_is_block_mutex(m) \
- buf_pointer_is_block_field((const void*)(m))
-/** Find out if a pointer corresponds to a buf_block_t::lock.
-@param l in: rw-lock candidate
-@return TRUE if l is a buf_block_t::lock */
-#define buf_pool_is_block_lock(l) \
- buf_pointer_is_block_field((const void*)(l))
-
/** Initialize a page for read to the buffer buf_pool. If the page is
(1) already in buf_pool, or
(2) if we specify to read only ibuf pages and the page is not an ibuf page, or
@@ -1111,14 +1035,6 @@ has returned NULL and before invoking buf_pool_watch_unset(space,offset).
bool buf_pool_watch_occurred(const page_id_t page_id)
MY_ATTRIBUTE((warn_unused_result));
-/** Verify the possibility that a stored page is not in buffer pool.
-@param[in] withdraw_clock withdraw clock when stored the page
-@retval true if the page might be relocated */
-UNIV_INLINE
-bool
-buf_pool_is_obsolete(
- ulint withdraw_clock);
-
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed.
@param[in] size size in bytes
@@ -1147,42 +1063,35 @@ void buf_flush_update_zip_checksum(buf_frame_t* page, ulint size);
NOTE! The definition appears here only for other modules of this
directory (buf) to see it. Do not use from outside! */
-class buf_tmp_buffer_t {
- /** whether this slot is reserved */
- std::atomic<bool> reserved;
+class buf_tmp_buffer_t
+{
+ /** whether this slot is reserved */
+ std::atomic<bool> reserved;
public:
- byte* crypt_buf; /*!< for encryption the data needs to be
- copied to a separate buffer before it's
- encrypted&written. this as a page can be
- read while it's being flushed */
- byte* comp_buf; /*!< for compression we need
- temporal buffer because page
- can be read while it's being flushed */
- byte* out_buf; /*!< resulting buffer after
- encryption/compression. This is a
- pointer and not allocated. */
-
- /** Release the slot */
- void release()
- {
- reserved.store(false, std::memory_order_relaxed);
- }
-
- /** Acquire the slot
- @return whether the slot was acquired */
- bool acquire()
- {
- return !reserved.exchange(true, std::memory_order_relaxed);
- }
-
- /** Allocate a buffer for encryption, decryption or decompression. */
- void allocate()
- {
- if (!crypt_buf) {
- crypt_buf= static_cast<byte*>(
- aligned_malloc(srv_page_size, srv_page_size));
- }
- }
+ /** For encryption, the data needs to be copied to a separate buffer
+ before it's encrypted&written. The buffer block itself can be replaced
+ while a write of crypt_buf to file is in progress. */
+ byte *crypt_buf;
+ /** buffer for fil_page_compress(), for flushing page_compressed pages */
+ byte *comp_buf;
+ /** pointer to resulting buffer after encryption or compression;
+ not separately allocated memory */
+ byte *out_buf;
+
+ /** Release the slot */
+ void release() { reserved.store(false, std::memory_order_relaxed); }
+
+ /** Acquire the slot
+ @return whether the slot was acquired */
+ bool acquire() { return !reserved.exchange(true, std::memory_order_relaxed);}
+
+ /** Allocate a buffer for encryption, decryption or decompression. */
+ void allocate()
+ {
+ if (!crypt_buf)
+ crypt_buf= static_cast<byte*>
+ (aligned_malloc(srv_page_size, srv_page_size));
+ }
};
/** The common buffer control block structure
@@ -1196,22 +1105,22 @@ public:
/** @name General fields
None of these bit-fields must be modified without holding
buf_page_get_mutex() [buf_block_t::mutex or
- buf_pool->zip_mutex], since they can be stored in the same
+ buf_pool.zip_mutex], since they can be stored in the same
machine word. Some of these fields are additionally protected
- by buf_pool->mutex. */
+ by buf_pool.mutex. */
/* @{ */
/** Page id. Protected by buf_pool mutex. */
page_id_t id;
buf_page_t* hash; /*!< node used in chaining to
- buf_pool->page_hash or
- buf_pool->zip_hash */
+ buf_pool.page_hash or
+ buf_pool.zip_hash */
/** Count of how manyfold this block is currently bufferfixed. */
Atomic_counter<uint32_t> buf_fix_count;
/** type of pending I/O operation; also protected by
- buf_pool->mutex for writes only */
+ buf_pool.mutex for writes only */
buf_io_fix io_fix;
/** Block state. @see buf_page_in_file */
@@ -1224,10 +1133,10 @@ public:
/* @} */
page_zip_des_t zip; /*!< compressed page; zip.data
(but not the data it points to) is
- also protected by buf_pool->mutex;
+ also protected by buf_pool.mutex;
state == BUF_BLOCK_ZIP_PAGE and
zip.data == NULL means an active
- buf_pool->watch */
+ buf_pool.watch */
ulint write_size; /* Write size is set when this
page is first time written and then
@@ -1244,19 +1153,19 @@ public:
used for encryption/compression
or NULL */
#ifdef UNIV_DEBUG
- ibool in_page_hash; /*!< TRUE if in buf_pool->page_hash */
- ibool in_zip_hash; /*!< TRUE if in buf_pool->zip_hash */
+ ibool in_page_hash; /*!< TRUE if in buf_pool.page_hash */
+ ibool in_zip_hash; /*!< TRUE if in buf_pool.zip_hash */
#endif /* UNIV_DEBUG */
/** @name Page flushing fields
- All these are protected by buf_pool->mutex. */
+ All these are protected by buf_pool.mutex. */
/* @{ */
UT_LIST_NODE_T(buf_page_t) list;
/*!< based on state, this is a
list node, protected either by
- buf_pool->mutex or by
- buf_pool->flush_list_mutex,
+ buf_pool.mutex or by
+ buf_pool.flush_list_mutex,
in one of the following lists in
buf_pool:
@@ -1267,9 +1176,9 @@ public:
If bpage is part of flush_list
then the node pointers are
- covered by buf_pool->flush_list_mutex.
+ covered by buf_pool.flush_list_mutex.
Otherwise these pointers are
- protected by buf_pool->mutex.
+ protected by buf_pool.mutex.
The contents of the list node
is undefined if !in_flush_list
@@ -1280,19 +1189,19 @@ public:
BUF_BLOCK_READY_IN_USE. */
#ifdef UNIV_DEBUG
- ibool in_flush_list; /*!< TRUE if in buf_pool->flush_list;
- when buf_pool->flush_list_mutex is
+ ibool in_flush_list; /*!< TRUE if in buf_pool.flush_list;
+ when buf_pool.flush_list_mutex is
free, the following should hold:
in_flush_list
== (state == BUF_BLOCK_FILE_PAGE
|| state == BUF_BLOCK_ZIP_DIRTY)
Writes to this field must be
covered by both block->mutex
- and buf_pool->flush_list_mutex. Hence
+ and buf_pool.flush_list_mutex. Hence
reads can happen while holding
any one of the two mutexes */
- ibool in_free_list; /*!< TRUE if in buf_pool->free; when
- buf_pool->mutex is free, the following
+ ibool in_free_list; /*!< TRUE if in buf_pool.free; when
+ buf_pool.mutex is free, the following
should hold: in_free_list
== (state == BUF_BLOCK_NOT_USED) */
#endif /* UNIV_DEBUG */
@@ -1307,13 +1216,13 @@ public:
modifications are on disk.
Writes to this field must be
covered by both block->mutex
- and buf_pool->flush_list_mutex. Hence
+ and buf_pool.flush_list_mutex. Hence
reads can happen while holding
any one of the two mutexes */
/* @} */
/** @name LRU replacement algorithm fields
- These fields are protected by buf_pool->mutex only (not
- buf_pool->zip_mutex or buf_block_t::mutex). */
+ These fields are protected by buf_pool.mutex only (not
+ buf_pool.zip_mutex or buf_block_t::mutex). */
/* @{ */
UT_LIST_NODE_T(buf_page_t) LRU;
@@ -1324,9 +1233,9 @@ public:
debugging */
#endif /* UNIV_DEBUG */
unsigned old:1; /*!< TRUE if the block is in the old
- blocks in buf_pool->LRU_old */
+ blocks in buf_pool.LRU_old */
unsigned freed_page_clock:31;/*!< the value of
- buf_pool->freed_page_clock
+ buf_pool.freed_page_clock
when this block was the last
time put to the head of the
LRU list; a thread is allowed
@@ -1398,7 +1307,7 @@ struct buf_block_t{
buf_page_t page; /*!< page information; this must
be the first field, so that
- buf_pool->page_hash can point
+ buf_pool.page_hash can point
to buf_page_t or buf_block_t */
byte* frame; /*!< pointer to buffer frame which
is of size srv_page_size, and
@@ -1420,7 +1329,7 @@ struct buf_block_t{
uint32_t lock_hash_val; /*!< hashed value of the page address
in the record lock hash table;
protected by buf_block_t::lock
- (or buf_block_t::mutex, buf_pool->mutex
+ (or buf_block_t::mutex, buf_pool.mutex
in buf_page_get_gen(),
buf_page_init_for_read()
and buf_page_create()) */
@@ -1470,7 +1379,7 @@ struct buf_block_t{
An exception to this is when we init or create a page
in the buffer pool in buf0buf.cc.
- Another exception for buf_pool_clear_hash_index() is that
+ Another exception for buf_pool_t::clear_hash_index() is that
assigning block->index = NULL (and block->n_pointers = 0)
is allowed whenever btr_search_own_all(RW_LOCK_X).
@@ -1486,7 +1395,7 @@ struct buf_block_t{
and holding some latch prevents the state from changing to that.
Some use of assert_block_ahi_empty() or assert_block_ahi_valid()
- is prone to race conditions while buf_pool_clear_hash_index() is
+ is prone to race conditions while buf_pool_t::clear_hash_index() is
executing (the adaptive hash index is being disabled). Such use
is explicitly commented. */
@@ -1571,7 +1480,7 @@ struct buf_block_t{
/**********************************************************************//**
-Compute the hash fold value for blocks in buf_pool->zip_hash. */
+Compute the hash fold value for blocks in buf_pool.zip_hash. */
/* @{ */
#define BUF_POOL_ZIP_FOLD_PTR(ptr) (ulint(ptr) >> srv_page_size_shift)
#define BUF_POOL_ZIP_FOLD(b) BUF_POOL_ZIP_FOLD_PTR((b)->frame)
@@ -1585,112 +1494,97 @@ even after we release the buffer pool mutex. */
class HazardPointer
{
public:
- /** Constructor
- @param mutex mutex that is protecting the hp. */
- HazardPointer(const ib_mutex_t* ut_d(mutex)) :
-#ifdef UNIV_DEBUG
- m_mutex(mutex),
-#endif
- m_hp() {}
+ virtual ~HazardPointer() {}
- /** Destructor */
- virtual ~HazardPointer() {}
+ /** @return current value */
+ buf_page_t *get() const { ut_ad(mutex_own(m_mutex)); return m_hp; }
- /** Get current value */
- buf_page_t* get() const
- {
- ut_ad(mutex_own(m_mutex));
- return(m_hp);
- }
-
- /** Set current value
- @param bpage buffer block to be set as hp */
- void set(buf_page_t* bpage);
-
- /** Checks if a bpage is the hp
- @param bpage buffer block to be compared
- @return true if it is hp */
- bool is_hp(const buf_page_t* bpage) const
- {
- ut_ad(mutex_own(m_mutex));
- return bpage == m_hp;
- }
+ /** Set current value
+ @param bpage buffer block to be set as hp */
+ void set(buf_page_t *bpage)
+ {
+ ut_ad(mutex_own(m_mutex));
+ ut_ad(!bpage || buf_page_in_file(bpage));
+ m_hp= bpage;
+ }
- /** Adjust the value of hp. This happens when some
- other thread working on the same list attempts to
- remove the hp from the list. Must be implemented
- by the derived classes.
- @param bpage buffer block to be compared */
- virtual void adjust(const buf_page_t*) = 0;
+ /** Checks if a bpage is the hp
+ @param bpage buffer block to be compared
+ @return true if it is hp */
+ bool is_hp(const buf_page_t *bpage) const
+ { ut_ad(mutex_own(m_mutex)); return bpage == m_hp; }
-protected:
- /** Disable copying */
- HazardPointer(const HazardPointer&);
- HazardPointer& operator=(const HazardPointer&);
+ /** Adjust the value of hp. This happens when some
+ other thread working on the same list attempts to
+ remove the hp from the list. */
+ virtual void adjust(const buf_page_t*) = 0;
#ifdef UNIV_DEBUG
- /** mutex that protects access to the m_hp. */
- const ib_mutex_t* m_mutex;
+ /** mutex that protects access to the m_hp. */
+ const ib_mutex_t *m_mutex= nullptr;
#endif /* UNIV_DEBUG */
- /** hazard pointer. */
- buf_page_t* m_hp;
+protected:
+ /** hazard pointer */
+ buf_page_t *m_hp= nullptr;
};
-/** Class implementing buf_pool->flush_list hazard pointer */
-class FlushHp: public HazardPointer {
-
+/** Class implementing buf_pool.flush_list hazard pointer */
+class FlushHp : public HazardPointer
+{
public:
- /** Constructor
- @param mutex mutex that is protecting the hp. */
- FlushHp(const ib_mutex_t* mutex) : HazardPointer(mutex) {}
-
- /** Destructor */
- ~FlushHp() override {}
-
- /** Adjust the value of hp. This happens when some
- other thread working on the same list attempts to
- remove the hp from the list.
- @param bpage buffer block to be compared */
- void adjust(const buf_page_t* bpage) override;
-};
+ ~FlushHp() override {}
+
+ /** Adjust the value of hp. This happens when some
+ other thread working on the same list attempts to
+ remove the hp from the list.
+ @param bpage buffer block to be compared */
+ void adjust(const buf_page_t *bpage) override
+ {
+ ut_ad(bpage != NULL);
-/** Class implementing buf_pool->LRU hazard pointer */
-class LRUHp: public HazardPointer {
+ /* We only support reverse traversal for now. */
+ if (is_hp(bpage))
+ m_hp= UT_LIST_GET_PREV(list, m_hp);
+ ut_ad(!m_hp || m_hp->in_flush_list);
+ }
+};
+
+/** Class implementing buf_pool.LRU hazard pointer */
+class LRUHp : public HazardPointer {
public:
- /** Constructor
- @param mutex mutex that is protecting the hp. */
- LRUHp(const ib_mutex_t* mutex) : HazardPointer(mutex) {}
-
- /** Destructor */
- ~LRUHp() override {}
-
- /** Adjust the value of hp. This happens when some
- other thread working on the same list attempts to
- remove the hp from the list.
- @param bpage buffer block to be compared */
- void adjust(const buf_page_t* bpage) override;
+ ~LRUHp() override {}
+
+ /** Adjust the value of hp. This happens when some
+ other thread working on the same list attempts to
+ remove the hp from the list.
+ @param bpage buffer block to be compared */
+ void adjust(const buf_page_t *bpage) override
+ {
+ ut_ad(bpage);
+ /** We only support reverse traversal for now. */
+ if (is_hp(bpage))
+ m_hp= UT_LIST_GET_PREV(LRU, m_hp);
+
+ ut_ad(!m_hp || m_hp->in_LRU_list);
+ }
};
/** Special purpose iterators to be used when scanning the LRU list.
The idea is that when one thread finishes the scan it leaves the
itr in that position and the other thread can start scan from
there */
-class LRUItr: public LRUHp {
+class LRUItr : public LRUHp {
public:
- /** Constructor
- @param mutex mutex that is protecting the hp. */
- LRUItr(const ib_mutex_t* mutex) : LRUHp(mutex) {}
-
- /** Destructor */
- ~LRUItr() override {}
-
- /** Select from where to start a scan. If we have scanned
- too deep into the LRU list it resets the value to the tail
- of the LRU list.
- @return buf_page_t from where to start scan. */
- inline buf_page_t* start();
+ LRUItr() : LRUHp() {}
+ ~LRUItr() override {}
+
+ /** Select from where to start a scan. If we have scanned
+ too deep into the LRU list it resets the value to the tail
+ of the LRU list.
+ @return buf_page_t from where to start scan. */
+ inline buf_page_t *start();
};
/** Struct that is embedded in the free zip blocks */
@@ -1753,8 +1647,218 @@ struct buf_buddy_stat_t {
};
/** The buffer pool */
-struct buf_pool_t
+class buf_pool_t
{
+ /** A chunk of buffers */
+ struct chunk_t
+ {
+ /** number of elements in blocks[] */
+ size_t size;
+ /** memory allocated for the page frames */
+ unsigned char *mem;
+ /** descriptor of mem */
+ ut_new_pfx_t mem_pfx;
+ /** array of buffer control blocks */
+ buf_block_t *blocks;
+
+ /** Map of first page frame address to chunks[] */
+ using map= std::map<const void*, chunk_t*, std::less<const void*>,
+ ut_allocator<std::pair<const void* const,chunk_t*>>>;
+ /** Chunk map that may be under construction by buf_resize_thread() */
+ static map *map_reg;
+ /** Current chunk map for lookup only */
+ static map *map_ref;
+
+ /** @return the memory size bytes. */
+ size_t mem_size() const { return mem_pfx.m_size; }
+
+ /** Register the chunk */
+ void reg() { map_reg->emplace(map::value_type(blocks->frame, this)); }
+
+ /** Allocate a chunk of buffer frames.
+ @param bytes requested size
+ @return whether the allocation succeeded */
+ inline bool create(size_t bytes);
+
+#ifdef UNIV_DEBUG
+ /** Find a block that points to a ROW_FORMAT=COMPRESSED page
+ @param data pointer to the start of a ROW_FORMAT=COMPRESSED page frame
+ @return the block
+ @retval nullptr if not found */
+ const buf_block_t *contains_zip(const void *data) const
+ {
+ const buf_block_t *block= blocks;
+ for (auto i= size; i--; block++)
+ if (block->page.zip.data == data)
+ return block;
+ return nullptr;
+ }
+
+ /** Check that all blocks are in a replaceable state.
+ @return address of a non-free block
+ @retval nullptr if all freed */
+ inline const buf_block_t *not_freed() const;
+#endif /* UNIV_DEBUG */
+ };
+
+ /** Withdraw blocks from the buffer pool until meeting withdraw_target.
+ @return whether retry is needed */
+ inline bool withdraw_blocks();
+
+ /** Determine if a pointer belongs to a buf_block_t. It can be a pointer to
+ the buf_block_t itself or a member of it.
+ @param ptr a pointer that will not be dereferenced
+ @return whether the ptr belongs to a buf_block_t struct */
+ bool is_block_field(const void *ptr) const
+ {
+ const chunk_t *chunk= chunks;
+ const chunk_t *const echunk= chunk + ut_min(n_chunks, n_chunks_new);
+
+ /* TODO: protect chunks with a mutex (the older pointer will
+ currently remain during resize()) */
+ for (; chunk < echunk; chunk++)
+ if (ptr >= reinterpret_cast<const void*>(chunk->blocks) &&
+ ptr < reinterpret_cast<const void*>(chunk->blocks + chunk->size))
+ return true;
+ return false;
+ }
+
+ /** Try to reallocate a control block.
+ @param block control block to reallocate
+ @return whether the reallocation succeeded */
+ inline bool realloc(buf_block_t *block);
+
+public:
+ bool is_initialised() const { return chunks != nullptr; }
+
+ /** Create the buffer pool.
+ @return whether the creation failed */
+ bool create();
+
+ /** Clean up after successful create() */
+ void close();
+
+ /** Resize from srv_buf_pool_old_size to srv_buf_pool_size. */
+ inline void resize();
+
+ /** @return whether resize() is in progress */
+ bool resize_in_progress() const
+ {
+ return UNIV_UNLIKELY(resizing.load(std::memory_order_relaxed) ||
+ withdrawing.load(std::memory_order_relaxed));
+ }
+
+ /** @return the withdraw_clock */
+ ulint withdraw_clock() const
+ { return withdraw_clock_.load(std::memory_order_relaxed); }
+
+ /** Verify the possibility that a stored page is not in buffer pool.
+ @param withdraw_clock the withdraw clock of the page
+ @return whether the page might be relocated */
+ bool is_obsolete(ulint withdraw_clock) const
+ {
+ return UNIV_UNLIKELY(withdrawing.load(std::memory_order_relaxed) ||
+ this->withdraw_clock() != withdraw_clock);
+ }
+
+ /** @return the current size in blocks */
+ size_t get_n_pages() const
+ {
+ ut_ad(is_initialised());
+ size_t size= 0;
+ for (auto j= n_chunks; j--; )
+ size+= chunks[j].size;
+ return size;
+ }
+
+ /** Determine whether a frame is intended to be withdrawn during resize().
+ @param ptr pointer within a buf_block_t::frame
+ @return whether the frame will be withdrawn */
+ bool will_be_withdrawn(const byte *ptr) const
+ {
+ ut_ad(curr_size < old_size);
+ ut_ad(!resizing.load(std::memory_order_relaxed) || mutex_own(&mutex));
+
+ for (const chunk_t *chunk= chunks + n_chunks_new,
+ * const echunk= chunks + n_chunks;
+ chunk != echunk; chunk++)
+ if (ptr >= chunk->blocks->frame &&
+ ptr < (chunk->blocks + chunk->size - 1)->frame + srv_page_size)
+ return true;
+ return false;
+ }
+
+ /** Determine whether a block is intended to be withdrawn during resize().
+ @param bpage buffer pool block
+ @return whether the frame will be withdrawn */
+ bool will_be_withdrawn(const buf_page_t &bpage) const
+ {
+ ut_ad(curr_size < old_size);
+ ut_ad(!resizing.load(std::memory_order_relaxed) || mutex_own(&mutex));
+
+ for (const chunk_t *chunk= chunks + n_chunks_new,
+ * const echunk= chunks + n_chunks;
+ chunk != echunk; chunk++)
+ if (&bpage >= &chunk->blocks->page &&
+ &bpage < &chunk->blocks[chunk->size].page)
+ return true;
+ return false;
+ }
+
+#ifdef UNIV_DEBUG
+ /** Find a block that points to a ROW_FORMAT=COMPRESSED page
+ @param data pointer to the start of a ROW_FORMAT=COMPRESSED page frame
+ @return the block
+ @retval nullptr if not found */
+ const buf_block_t *contains_zip(const void *data) const
+ {
+ ut_ad(mutex_own(&mutex));
+ for (const chunk_t *chunk= chunks, * const end= chunks + n_chunks;
+ chunk != end; chunk++)
+ if (const buf_block_t *block= chunk->contains_zip(data))
+ return block;
+ return nullptr;
+ }
+
+ /** Assert that all buffer pool pages are in a replaceable state */
+ void assert_all_freed();
+#endif /* UNIV_DEBUG */
+
+#ifdef BTR_CUR_HASH_ADAPT
+ /** Clear the adaptive hash index on all pages in the buffer pool. */
+ inline void clear_hash_index();
+
+ /** Get a buffer block from an adaptive hash index pointer.
+ This function does not return if the block is not identified.
+ @param ptr pointer to within a page frame
+ @return pointer to block, never NULL */
+ inline buf_block_t* block_from_ahi(const byte *ptr) const;
+#endif /* BTR_CUR_HASH_ADAPT */
+
+ bool is_block_mutex(const BPageMutex *m) const
+ { return is_block_field(reinterpret_cast<const void*>(m)); }
+ bool is_block_lock(const BPageLock *l) const
+ { return is_block_field(reinterpret_cast<const void*>(l)); }
+
+ /** Determine if a buffer block was created by chunk_t::create().
+ @param block block descriptor (not dereferenced)
+ @return whether block has been created by chunk_t::create() */
+ bool is_uncompressed(const buf_block_t *block) const
+ {
+ /* The pointer should be aligned. */
+ return !(size_t(block) % sizeof *block) &&
+ is_block_field(reinterpret_cast<const void*>(block));
+ }
+
+#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
+ /** Validate the buffer pool. */
+ void validate();
+#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
+#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
+ /** Write information of the buf_pool to the error log. */
+ void print();
+#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
+
/** @name General fields */
/* @{ */
BufPoolMutex mutex; /*!< Buffer pool mutex */
@@ -1776,8 +1880,8 @@ struct buf_pool_t
member. */
volatile ulint n_chunks; /*!< number of buffer pool chunks */
volatile ulint n_chunks_new; /*!< new number of buffer pool chunks */
- buf_chunk_t* chunks; /*!< buffer pool chunks */
- buf_chunk_t* chunks_old; /*!< old buffer pool chunks to be freed
+ chunk_t* chunks; /*!< buffer pool chunks */
+ chunk_t* chunks_old; /*!< old buffer pool chunks to be freed
after resizing buffer pool */
ulint curr_size; /*!< current pool size in pages */
ulint old_size; /*!< previous pool size in pages */
@@ -1791,9 +1895,9 @@ struct buf_pool_t
page_hash is protected by an
array of mutexes.
Changes in page_hash are protected
- by buf_pool->mutex and the relevant
+ by buf_pool.mutex and the relevant
page_hash mutex. Lookups can happen
- while holding the buf_pool->mutex or
+ while holding the buf_pool.mutex or
the relevant page_hash mutex. */
hash_table_t* page_hash_old; /*!< old pointer to page_hash to be
freed after resizing buffer pool */
@@ -1880,7 +1984,7 @@ struct buf_pool_t
eviction. Set to TRUE whenever
we flush a batch from the
buffer pool. Protected by the
- buf_pool->mutex */
+ buf_pool.mutex */
/* @} */
/** @name LRU replacement algorithm fields */
@@ -1953,49 +2057,64 @@ struct buf_pool_t
buf_page_t* watch;
/*!< Sentinel records for buffer
pool watches. Protected by
- buf_pool->mutex. */
-
- /** Temporary memory for page_compressed and encrypted I/O */
- struct io_buf_t {
- /** number of elements in slots[] */
- const ulint n_slots;
- /** array of slots */
- buf_tmp_buffer_t* const slots;
-
- io_buf_t() = delete;
-
- /** Constructor */
- explicit io_buf_t(ulint n_slots) :
- n_slots(n_slots),
- slots(static_cast<buf_tmp_buffer_t*>(
- ut_malloc_nokey(n_slots
- * sizeof *slots)))
- {
- memset((void*) slots, 0, n_slots * sizeof *slots);
- }
+ buf_pool.mutex. */
- ~io_buf_t()
- {
- for (buf_tmp_buffer_t *s= slots, *e= slots + n_slots;
- s != e; s++) {
- aligned_free(s->crypt_buf);
- aligned_free(s->comp_buf);
- }
- ut_free(slots);
- }
- /** Reserve a buffer */
- buf_tmp_buffer_t* reserve()
- {
- for (buf_tmp_buffer_t* s = slots, *e = slots + n_slots;
- s != e; s++) {
- if (s->acquire()) return s;
- }
- return NULL;
- }
- } io_buf;
+ /** Reserve a buffer. */
+ buf_tmp_buffer_t *io_buf_reserve() { return io_buf.reserve(); }
+private:
+ /** Temporary memory for page_compressed and encrypted I/O */
+ struct io_buf_t
+ {
+ /** number of elements in slots[] */
+ ulint n_slots;
+ /** array of slots */
+ buf_tmp_buffer_t *slots;
+
+ void create(ulint n_slots)
+ {
+ this->n_slots= n_slots;
+ slots= static_cast<buf_tmp_buffer_t*>
+ (ut_malloc_nokey(n_slots * sizeof *slots));
+ memset((void*) slots, 0, n_slots * sizeof *slots);
+ }
+
+ void close()
+ {
+ for (buf_tmp_buffer_t *s= slots, *e= slots + n_slots; s != e; s++)
+ {
+ aligned_free(s->crypt_buf);
+ aligned_free(s->comp_buf);
+ }
+ ut_free(slots);
+ slots= nullptr;
+ n_slots= 0;
+ }
+
+ /** Reserve a buffer */
+ buf_tmp_buffer_t *reserve()
+ {
+ for (buf_tmp_buffer_t *s= slots, *e= slots + n_slots; s != e; s++)
+ if (s->acquire())
+ return s;
+ return nullptr;
+ }
+ } io_buf;
+
+ /** whether resize() is in the critical path */
+ std::atomic<bool> resizing;
+
+ /** whether withdrawing buffer pool pages might cause page relocation */
+ std::atomic<bool> withdrawing;
+
+ /** a counter that is incremented every time a pointer to a page may
+ become obsolete */
+ std::atomic<ulint> withdraw_clock_;
};
+/** The InnoDB buffer pool */
+extern buf_pool_t buf_pool;
+
/** @name Accessors for buffer pool mutexes
Use these instead of accessing buffer pool mutexes directly. */
/* @{ */
@@ -2019,15 +2138,15 @@ UNIV_INLINE
rw_lock_t*
buf_page_hash_lock_get(const page_id_t& page_id)
{
- return hash_get_lock(buf_pool->page_hash, page_id.fold());
+ return hash_get_lock(buf_pool.page_hash, page_id.fold());
}
/** If not appropriate page_hash_lock, relock until appropriate. */
# define buf_page_hash_lock_s_confirm(hash_lock, page_id)\
- hash_lock_s_confirm(hash_lock, buf_pool->page_hash, (page_id).fold())
+ hash_lock_s_confirm(hash_lock, buf_pool.page_hash, (page_id).fold())
# define buf_page_hash_lock_x_confirm(hash_lock, page_id)\
- hash_lock_x_confirm(hash_lock, buf_pool->page_hash, (page_id).fold())
+ hash_lock_x_confirm(hash_lock, buf_pool.page_hash, (page_id).fold())
#ifdef UNIV_DEBUG
/** Test if page_hash lock is held in s-mode. */
@@ -2063,13 +2182,13 @@ buf_page_hash_lock_get(const page_id_t& page_id)
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/** Forbid the release of the buffer pool mutex. */
# define buf_pool_mutex_exit_forbid() do { \
- ut_ad(mutex_own(&buf_pool->mutex)); \
- buf_pool->mutex_exit_forbidden++; \
+ ut_ad(mutex_own(&buf_pool.mutex)); \
+ buf_pool.mutex_exit_forbidden++; \
} while (0)
/** Allow the release of the buffer pool mutex. */
# define buf_pool_mutex_exit_allow() do { \
- ut_ad(mutex_own(&buf_pool->mutex)); \
- ut_ad(buf_pool->mutex_exit_forbidden--); \
+ ut_ad(mutex_own(&buf_pool.mutex)); \
+ ut_ad(buf_pool.mutex_exit_forbidden--); \
} while (0)
#else
/** Forbid the release of the buffer pool mutex. */
@@ -2127,15 +2246,14 @@ FILE_PAGE => NOT_USED NOTE: This transition is allowed if and only if
too deep into the LRU list it resets the value to the tail
of the LRU list.
@return buf_page_t from where to start scan. */
-inline buf_page_t* LRUItr::start()
+inline buf_page_t *LRUItr::start()
{
- ut_ad(mutex_own(m_mutex));
+ ut_ad(mutex_own(m_mutex));
- if (!m_hp || m_hp->old) {
- m_hp = UT_LIST_GET_LAST(buf_pool->LRU);
- }
+ if (!m_hp || m_hp->old)
+ m_hp= UT_LIST_GET_LAST(buf_pool.LRU);
- return(m_hp);
+ return m_hp;
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -2148,7 +2266,7 @@ struct CheckInLRUList {
static void validate()
{
- ut_list_validate(buf_pool->LRU, CheckInLRUList());
+ ut_list_validate(buf_pool.LRU, CheckInLRUList());
}
};
@@ -2161,7 +2279,7 @@ struct CheckInFreeList {
static void validate()
{
- ut_list_validate(buf_pool->free, CheckInFreeList());
+ ut_list_validate(buf_pool.free, CheckInFreeList());
}
};
@@ -2174,7 +2292,7 @@ struct CheckUnzipLRUAndLRUList {
static void validate()
{
- ut_list_validate(buf_pool->unzip_LRU,
+ ut_list_validate(buf_pool.unzip_LRU,
CheckUnzipLRUAndLRUList());
}
};
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 55fe7d74760..7b74705d5c7 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -37,23 +37,6 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0rea.h"
#include "fsp0types.h"
-/** A chunk of buffers. The buffer pool is allocated in chunks. */
-struct buf_chunk_t{
- ulint size; /*!< size of frames[] and blocks[] */
- unsigned char* mem; /*!< pointer to the memory area which
- was allocated for the frames */
- ut_new_pfx_t mem_pfx; /*!< Auxiliary structure, describing
- "mem". It is filled by the allocator's
- alloc method and later passed to the
- deallocate method. */
- buf_block_t* blocks; /*!< array of buffer control blocks */
-
- /** Get the size of 'mem' in bytes. */
- size_t mem_size() const {
- return(mem_pfx.m_size);
- }
-};
-
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@@ -65,20 +48,6 @@ buf_pool_get_curr_size(void)
return(srv_buf_pool_curr_size);
}
-/*********************************************************************//**
-Gets the current size of buffer buf_pool in pages.
-@return size in pages*/
-inline ulint buf_pool_get_n_pages()
-{
- if (!buf_pool)
- return srv_buf_pool_curr_size >> srv_page_size_shift;
-
- ulint chunk_size= 0;
- for (uint j= 0; j < buf_pool->n_chunks; j++)
- chunk_size+= buf_pool->chunks[j].size;
- return chunk_size;
-}
-
/********************************************************************//**
Reads the freed_page_clock of a buffer block.
@return freed_page_clock */
@@ -88,7 +57,7 @@ buf_page_get_freed_page_clock(
/*==========================*/
const buf_page_t* bpage) /*!< in: block */
{
- /* This is sometimes read without holding buf_pool->mutex. */
+ /* This is sometimes read without holding buf_pool.mutex. */
return(bpage->freed_page_clock);
}
@@ -113,10 +82,10 @@ The page must be either buffer-fixed, or its page hash must be locked.
inline bool buf_page_peek_if_young(const buf_page_t *bpage)
{
/* FIXME: bpage->freed_page_clock is 31 bits */
- return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
+ return((buf_pool.freed_page_clock & ((1UL << 31) - 1))
< (bpage->freed_page_clock
- + (buf_pool->curr_size
- * (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
+ + (buf_pool.curr_size
+ * (BUF_LRU_OLD_RATIO_DIV - buf_pool.LRU_old_ratio)
/ (BUF_LRU_OLD_RATIO_DIV * 4))));
}
@@ -126,7 +95,7 @@ there is danger of dropping from the buffer pool.
@return true if bpage should be made younger */
inline bool buf_page_peek_if_too_old(const buf_page_t *bpage)
{
- if (buf_pool->freed_page_clock == 0) {
+ if (buf_pool.freed_page_clock == 0) {
/* If eviction has not started yet, do not update the
statistics or move blocks in the LRU list. This is
either the warm-up phase or an in-memory workload. */
@@ -146,7 +115,7 @@ inline bool buf_page_peek_if_too_old(const buf_page_t *bpage)
return(TRUE);
}
- buf_pool->stat.n_pages_not_made_young++;
+ buf_pool.stat.n_pages_not_made_young++;
return false;
} else {
return !buf_page_peek_if_young(bpage);
@@ -315,7 +284,7 @@ buf_page_get_mutex(
return(NULL);
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
- return(&buf_pool->zip_mutex);
+ return(&buf_pool.zip_mutex);
default:
return(&((buf_block_t*) bpage)->mutex);
}
@@ -418,7 +387,7 @@ buf_page_set_io_fix(
buf_page_t* bpage, /*!< in/out: control block */
enum buf_io_fix io_fix) /*!< in: io_fix state */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
bpage->io_fix = io_fix;
@@ -439,7 +408,7 @@ buf_block_set_io_fix(
/*********************************************************************//**
Makes a block sticky. A sticky block implies that even after we release
-the buf_pool->mutex and the block->mutex:
+the buf_pool.mutex and the block->mutex:
* it cannot be removed from the flush_list
* the block descriptor cannot be relocated
* it cannot be removed from the LRU list
@@ -452,7 +421,7 @@ buf_page_set_sticky(
/*================*/
buf_page_t* bpage) /*!< in/out: control block */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
@@ -467,7 +436,7 @@ buf_page_unset_sticky(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
@@ -483,7 +452,7 @@ buf_page_can_relocate(
/*==================*/
const buf_page_t* bpage) /*!< control block being relocated */
{
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
@@ -505,7 +474,7 @@ buf_page_is_old(
purposes even if LRU mutex is not being held. Keep the assertion
for not since all the callers hold it. */
ut_ad(mutex_own(buf_page_get_mutex(bpage))
- || mutex_own(&buf_pool->mutex));
+ || mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
return(bpage->old);
@@ -521,13 +490,13 @@ buf_page_set_old(
bool old) /*!< in: old */
{
ut_a(buf_page_in_file(bpage));
- ut_ad(mutex_own(&buf_pool->mutex));
+ ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(bpage->in_LRU_list);
#ifdef UNIV_LRU_DEBUG
- ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
+ ut_a((buf_pool.LRU_old_len == 0) == (buf_pool.LRU_old == NULL));
/* If a block is flagged "old", the LRU_old list must exist. */
- ut_a(!old || buf_pool->LRU_old);
+ ut_a(!old || buf_pool.LRU_old);
if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
const buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
@@ -536,7 +505,7 @@ buf_page_set_old(
ut_a(prev->old == old);
} else {
ut_a(!prev->old);
- ut_a(buf_pool->LRU_old == (old ? bpage : next));
+ ut_a(buf_pool.LRU_old == (old ? bpage : next));
}
}
#endif /* UNIV_LRU_DEBUG */
@@ -566,7 +535,7 @@ buf_page_set_accessed(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
{
- ut_ad(!mutex_own(&buf_pool->mutex));
+ ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_in_file(bpage));
@@ -588,7 +557,7 @@ buf_page_get_block(
{
if (bpage != NULL) {
ut_ad(buf_page_hash_lock_held_s_or_x(bpage)
- || mutex_own(&buf_pool->mutex));
+ || mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
@@ -707,7 +676,7 @@ buf_block_free(
/*===========*/
buf_block_t* block) /*!< in, own: block to be freed */
{
- mutex_enter(&buf_pool->mutex);
+ mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(block);
@@ -717,7 +686,7 @@ buf_block_free(
buf_page_mutex_exit(block);
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
}
/********************************************************************//**
@@ -732,7 +701,7 @@ buf_block_modify_clock_inc(
{
/* No latch is acquired for the shared temporary tablespace. */
ut_ad(fsp_is_system_temporary(block->page.id.space())
- || (mutex_own(&buf_pool->mutex)
+ || (mutex_own(&buf_pool.mutex)
&& block->page.buf_fix_count == 0)
|| rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
@@ -819,14 +788,14 @@ inline buf_page_t *buf_page_hash_get_low(page_id_t page_id)
#ifdef UNIV_DEBUG
rw_lock_t* hash_lock;
- hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
+ hash_lock = hash_get_lock(buf_pool.page_hash, page_id.fold());
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)
|| rw_lock_own(hash_lock, RW_LOCK_S));
#endif /* UNIV_DEBUG */
/* Look for the page in the hash table */
- HASH_SEARCH(hash, buf_pool->page_hash, page_id.fold(), buf_page_t*,
+ HASH_SEARCH(hash, buf_pool.page_hash, page_id.fold(), buf_page_t*,
bpage,
ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
&& buf_page_in_file(bpage)),
@@ -874,7 +843,7 @@ buf_page_hash_get_locked(
mode = lock_mode;
}
- hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
+ hash_lock = hash_get_lock(buf_pool.page_hash, page_id.fold());
ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)
&& !rw_lock_own(hash_lock, RW_LOCK_S));
@@ -884,12 +853,12 @@ buf_page_hash_get_locked(
/* If not own buf_pool_mutex, page_hash can be changed. */
hash_lock = hash_lock_s_confirm(
- hash_lock, buf_pool->page_hash, page_id.fold());
+ hash_lock, buf_pool.page_hash, page_id.fold());
} else {
rw_lock_x_lock(hash_lock);
/* If not own buf_pool_mutex, page_hash can be changed. */
hash_lock = hash_lock_x_confirm(
- hash_lock, buf_pool->page_hash, page_id.fold());
+ hash_lock, buf_pool.page_hash, page_id.fold());
}
bpage = buf_page_hash_get_low(page_id);
@@ -1078,18 +1047,6 @@ buf_page_get_frame(
}
}
-/** Verify the possibility that a stored page is not in buffer pool.
-@param[in] withdraw_clock withdraw clock when stored the page
-@retval true if the page might be relocated */
-UNIV_INLINE
-bool
-buf_pool_is_obsolete(
- ulint withdraw_clock)
-{
- return(UNIV_UNLIKELY(buf_pool_withdrawing
- || buf_withdraw_clock != withdraw_clock));
-}
-
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed.
@param[in] size size in bytes
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index 3a41e0dd664..9ed42f4eed0 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -64,7 +64,7 @@ void buf_LRU_flush_or_remove_pages(ulint id, bool flush, ulint first = 0);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
-Insert a compressed block into buf_pool->zip_clean in the LRU order. */
+Insert a compressed block into buf_pool.zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
@@ -76,10 +76,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.
NOTE: If this function returns true, it will temporarily
-release buf_pool->mutex. Furthermore, the page frame will no longer be
+release buf_pool.mutex. Furthermore, the page frame will no longer be
accessible via bpage.
-The caller must hold buf_pool->mutex and must not hold any
+The caller must hold buf_pool.mutex and must not hold any
buf_page_get_mutex() when calling this function.
@return true if freed, false otherwise. */
bool
@@ -96,7 +96,7 @@ buf_LRU_free_page(
@return true if found and freed */
bool buf_LRU_scan_and_free_block(bool scan_all);
-/** @return a buffer block from the buf_pool->free list
+/** @return a buffer block from the buf_pool.free list
@retval NULL if the free list is empty */
buf_block_t* buf_LRU_get_free_only();
@@ -109,7 +109,7 @@ the free list. Even when we flush a page or find a page in LRU scan
we put it to free list to be used.
* iteration 0:
* get a block from free list, success:done
- * if buf_pool->try_LRU_scan is set
+ * if buf_pool.try_LRU_scan is set
* scan LRU up to srv_LRU_scan_depth to find a clean block
* the above will put the block on free list
* success:retry the free list
@@ -119,7 +119,7 @@ we put it to free list to be used.
* iteration 1:
* same as iteration 0 except:
* scan whole LRU list
- * scan LRU list even if buf_pool->try_LRU_scan is not set
+ * scan LRU list even if buf_pool.try_LRU_scan is not set
* iteration > 1:
* same as iteration 1 but sleep 10ms
@return the free control block, in state BUF_BLOCK_READY_FOR_USE */
@@ -159,11 +159,11 @@ Moves a block to the start of the LRU list. */
void
buf_LRU_make_block_young(buf_page_t* bpage);
-/** Update buf_pool->LRU_old_ratio.
+/** Update buf_pool.LRU_old_ratio.
@param[in] old_pct Reserve this percentage of
the buffer pool for "old" blocks
@param[in] adjust true=adjust the LRU list;
- false=just assign buf_pool->LRU_old_ratio
+ false=just assign buf_pool.LRU_old_ratio
during the initialization of InnoDB
@return updated old_pct */
uint buf_LRU_old_ratio_update(uint old_pct, bool adjust);
@@ -195,15 +195,15 @@ void buf_LRU_print();
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
/** @name Heuristics for detecting index scan @{ */
-/** The denominator of buf_pool->LRU_old_ratio. */
+/** The denominator of buf_pool.LRU_old_ratio. */
#define BUF_LRU_OLD_RATIO_DIV 1024
-/** Maximum value of buf_pool->LRU_old_ratio.
+/** Maximum value of buf_pool.LRU_old_ratio.
@see buf_LRU_old_adjust_len
-@see buf_pool->LRU_old_ratio_update */
+@see buf_pool.LRU_old_ratio_update */
#define BUF_LRU_OLD_RATIO_MAX BUF_LRU_OLD_RATIO_DIV
-/** Minimum value of buf_pool->LRU_old_ratio.
+/** Minimum value of buf_pool.LRU_old_ratio.
@see buf_LRU_old_adjust_len
-@see buf_pool->LRU_old_ratio_update
+@see buf_pool.LRU_old_ratio_update
The minimum must exceed
(BUF_LRU_OLD_TOLERANCE + 5) * BUF_LRU_OLD_RATIO_DIV / BUF_LRU_OLD_MIN_LEN. */
#define BUF_LRU_OLD_RATIO_MIN 51
@@ -224,7 +224,7 @@ extern uint buf_LRU_old_threshold_ms;
These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O
and page_zip_decompress() operations. Based on the statistics we decide
-if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */
+if we want to evict from buf_pool.unzip_LRU or buf_pool.LRU. */
struct buf_LRU_stat_t
{
ulint io; /**< Counter of buffer pool I/O operations. */
@@ -236,7 +236,7 @@ Cleared by buf_LRU_stat_update(). */
extern buf_LRU_stat_t buf_LRU_stat_cur;
/** Running sum of past values of buf_LRU_stat_cur.
-Updated by buf_LRU_stat_update(). Protected by buf_pool->mutex. */
+Updated by buf_LRU_stat_update(). Protected by buf_pool.mutex. */
extern buf_LRU_stat_t buf_LRU_stat_sum;
/********************************************************************//**
diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h
index 8570b87b67a..124a7f1854f 100644
--- a/storage/innobase/include/buf0types.h
+++ b/storage/innobase/include/buf0types.h
@@ -34,10 +34,6 @@ Created 11/17/1995 Heikki Tuuri
class buf_page_t;
/** Buffer block for which an uncompressed page exists */
struct buf_block_t;
-/** Buffer pool chunk comprising buf_block_t */
-struct buf_chunk_t;
-/** Buffer pool comprising buf_chunk_t */
-struct buf_pool_t;
/** Buffer pool statistics struct */
struct buf_pool_stat_t;
/** Buffer pool buddy statistics struct */
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 06785c2a3b7..e05c2a586e3 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -352,7 +352,7 @@ extern const ulint srv_buf_pool_min_size;
extern const ulint srv_buf_pool_def_size;
/** Requested buffer pool chunk size */
extern ulong srv_buf_pool_chunk_unit;
-/** Number of locks to protect buf_pool->page_hash */
+/** Number of locks to protect buf_pool.page_hash */
extern ulong srv_n_page_hash_locks;
/** Scan depth for LRU flush batch i.e.: number of blocks scanned*/
extern ulong srv_LRU_scan_depth;
@@ -829,7 +829,7 @@ struct export_var_t{
ulint innodb_buffer_pool_pages_made_not_young;
ulint innodb_buffer_pool_pages_made_young;
ulint innodb_buffer_pool_pages_old;
- ulint innodb_buffer_pool_read_requests; /*!< buf_pool->stat.n_page_gets */
+ ulint innodb_buffer_pool_read_requests; /*!< buf_pool.stat.n_page_gets */
ulint innodb_buffer_pool_reads; /*!< srv_buf_pool_reads */
ulint innodb_buffer_pool_wait_free; /*!< srv_buf_pool_wait_free */
ulint innodb_buffer_pool_pages_flushed; /*!< srv_buf_pool_flushed */
@@ -866,9 +866,9 @@ struct export_var_t{
ulint innodb_os_log_fsyncs; /*!< n_log_flushes */
ulint innodb_os_log_pending_writes; /*!< srv_os_log_pending_writes */
ulint innodb_os_log_pending_fsyncs; /*!< n_pending_log_flushes */
- ulint innodb_pages_created; /*!< buf_pool->stat.n_pages_created */
- ulint innodb_pages_read; /*!< buf_pool->stat.n_pages_read*/
- ulint innodb_pages_written; /*!< buf_pool->stat.n_pages_written */
+ ulint innodb_pages_created; /*!< buf_pool.stat.n_pages_created */
+ ulint innodb_pages_read; /*!< buf_pool.stat.n_pages_read*/
+ ulint innodb_pages_written; /*!< buf_pool.stat.n_pages_written */
ulint innodb_row_lock_waits; /*!< srv_n_lock_wait_count */
ulint innodb_row_lock_current_waits; /*!< srv_n_lock_wait_current_count */
int64_t innodb_row_lock_time; /*!< srv_n_lock_wait_time
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 17c9a89cd0b..401de967808 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -516,8 +516,8 @@ void lock_sys_t::resize(ulint n_cells)
hash_table_free(old_hash);
/* need to update block->lock_hash_val */
- mutex_enter(&buf_pool->mutex);
- for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
+ mutex_enter(&buf_pool.mutex);
+ for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage; bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
buf_block_t* block = reinterpret_cast<buf_block_t*>(
@@ -527,7 +527,7 @@ void lock_sys_t::resize(ulint n_cells)
bpage->id.space(), bpage->id.page_no());
}
}
- mutex_exit(&buf_pool->mutex);
+ mutex_exit(&buf_pool.mutex);
mutex_exit(&mutex);
}
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index c8b02b63cf6..d6bdcee2a92 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -1680,7 +1680,7 @@ wait_suspend_loop:
}
}
- if (!buf_pool) {
+ if (!buf_pool.is_initialised()) {
ut_ad(!srv_was_started);
} else if (ulint pending_io = buf_pool_check_no_pending_io()) {
if (srv_print_verbose_log && count > 600) {
@@ -1754,7 +1754,7 @@ wait_suspend_loop:
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"Free innodb buffer pool");
- ut_d(buf_assert_all_freed());
+ ut_d(buf_pool.assert_all_freed());
ut_a(lsn == log_sys.get_lsn()
|| srv_force_recovery == SRV_FORCE_NO_LOG_REDO);
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index 7cd0ff1414c..098bc987252 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -1008,7 +1008,13 @@ void recv_sys_t::create()
apply_log_recs = false;
apply_batch_on = false;
- max_log_blocks = buf_pool_get_n_pages() / 3;
+ if (buf_pool.is_initialised()) {
+ max_log_blocks = buf_pool.get_n_pages() / 3;
+ } else {
+ ut_ad(srv_operation == SRV_OPERATION_BACKUP
+ || srv_operation == SRV_OPERATION_RESTORE_DELTA);
+ max_log_blocks = 0;
+ }
buf = static_cast<byte*>(ut_malloc_dontdump(RECV_PARSING_BUF_SIZE, PSI_INSTRUMENT_ME));
len = 0;
parse_start_lsn = 0;
@@ -1118,17 +1124,12 @@ inline void recv_sys_t::free(const void *data)
data= page_align(data);
ut_ad(mutex_own(&mutex));
-#ifdef UNIV_DEBUG
- /* MDEV-14481 FIXME: To prevent race condition with buf_pool_resize(),
+ /* MDEV-14481 FIXME: To prevent race condition with buf_pool.resize(),
we must acquire and hold the buffer pool mutex here. */
- extern volatile bool buf_pool_resizing;
- extern volatile bool buf_pool_withdrawing;
- ut_ad(!buf_pool_resizing);
- ut_ad(!buf_pool_withdrawing);
-#endif
+ ut_ad(!buf_pool.resize_in_progress());
- buf_chunk_t *chunk= buf_pool->chunks;
- for (auto i= buf_pool->n_chunks; i--; chunk++)
+ auto *chunk= buf_pool.chunks;
+ for (auto i= buf_pool.n_chunks; i--; chunk++)
{
if (data < chunk->blocks->frame)
continue;
@@ -3270,10 +3271,10 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
ut_ad(srv_operation == SRV_OPERATION_NORMAL
|| srv_operation == SRV_OPERATION_RESTORE
|| srv_operation == SRV_OPERATION_RESTORE_EXPORT);
- ut_d(mutex_enter(&buf_pool->flush_list_mutex));
- ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
- ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
- ut_d(mutex_exit(&buf_pool->flush_list_mutex));
+ ut_d(mutex_enter(&buf_pool.flush_list_mutex));
+ ut_ad(UT_LIST_GET_LEN(buf_pool.LRU) == 0);
+ ut_ad(UT_LIST_GET_LEN(buf_pool.unzip_LRU) == 0);
+ ut_d(mutex_exit(&buf_pool.flush_list_mutex));
/* Initialize red-black tree for fast insertions into the
flush_list during recovery process. */
diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc
index f34680b9aed..017c5da0353 100644
--- a/storage/innobase/srv/srv0mon.cc
+++ b/storage/innobase/srv/srv0mon.cc
@@ -1631,7 +1631,7 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_read_requests, the number of logical
read requests */
case MONITOR_OVLD_BUF_POOL_READ_REQUESTS:
- value = buf_pool->stat.n_page_gets;
+ value = buf_pool.stat.n_page_gets;
break;
/* innodb_buffer_pool_write_requests, the number of
@@ -1647,61 +1647,61 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_read_ahead */
case MONITOR_OVLD_BUF_POOL_READ_AHEAD:
- value = buf_pool->stat.n_ra_pages_read;
+ value = buf_pool.stat.n_ra_pages_read;
break;
/* innodb_buffer_pool_read_ahead_evicted */
case MONITOR_OVLD_BUF_POOL_READ_AHEAD_EVICTED:
- value = buf_pool->stat.n_ra_pages_evicted;
+ value = buf_pool.stat.n_ra_pages_evicted;
break;
/* innodb_buffer_pool_pages_total */
case MONITOR_OVLD_BUF_POOL_PAGE_TOTAL:
- value = buf_pool_get_n_pages();
+ value = buf_pool.get_n_pages();
break;
/* innodb_buffer_pool_pages_misc */
case MONITOR_OVLD_BUF_POOL_PAGE_MISC:
- value = buf_pool_get_n_pages()
- - UT_LIST_GET_LEN(buf_pool->LRU)
- - UT_LIST_GET_LEN(buf_pool->free);
+ value = buf_pool.get_n_pages()
+ - UT_LIST_GET_LEN(buf_pool.LRU)
+ - UT_LIST_GET_LEN(buf_pool.free);
break;
/* innodb_buffer_pool_pages_data */
case MONITOR_OVLD_BUF_POOL_PAGES_DATA:
- value = UT_LIST_GET_LEN(buf_pool->LRU);
+ value = UT_LIST_GET_LEN(buf_pool.LRU);
break;
/* innodb_buffer_pool_bytes_data */
case MONITOR_OVLD_BUF_POOL_BYTES_DATA:
- value = buf_pool->stat.LRU_bytes
- + (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
+ value = buf_pool.stat.LRU_bytes
+ + (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
<< srv_page_size_shift);
break;
/* innodb_buffer_pool_pages_dirty */
case MONITOR_OVLD_BUF_POOL_PAGES_DIRTY:
- value = UT_LIST_GET_LEN(buf_pool->flush_list);
+ value = UT_LIST_GET_LEN(buf_pool.flush_list);
break;
/* innodb_buffer_pool_bytes_dirty */
case MONITOR_OVLD_BUF_POOL_BYTES_DIRTY:
- value = buf_pool->stat.flush_list_bytes;
+ value = buf_pool.stat.flush_list_bytes;
break;
/* innodb_buffer_pool_pages_free */
case MONITOR_OVLD_BUF_POOL_PAGES_FREE:
- value = UT_LIST_GET_LEN(buf_pool->free);
+ value = UT_LIST_GET_LEN(buf_pool.free);
break;
/* innodb_pages_created, the number of pages created */
case MONITOR_OVLD_PAGE_CREATED:
- value = buf_pool->stat.n_pages_created;
+ value = buf_pool.stat.n_pages_created;
break;
/* innodb_pages_written, the number of page written */
case MONITOR_OVLD_PAGES_WRITTEN:
- value = buf_pool->stat.n_pages_written;
+ value = buf_pool.stat.n_pages_written;
break;
/* innodb_index_pages_written, the number of index pages written */
@@ -1716,7 +1716,7 @@ srv_mon_process_existing_counter(
/* innodb_pages_read */
case MONITOR_OVLD_PAGES_READ:
- value = buf_pool->stat.n_pages_read;
+ value = buf_pool.stat.n_pages_read;
break;
/* Number of times secondary index lookup triggered cluster lookup */
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index 6b6506a8b01..1a0bfb39523 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -207,7 +207,7 @@ const ulint srv_buf_pool_def_size = 128 * 1024 * 1024;
/** Requested buffer pool chunk size */
ulong srv_buf_pool_chunk_unit;
/** innodb_page_hash_locks (a debug-only parameter);
-number of locks to protect buf_pool->page_hash */
+number of locks to protect buf_pool.page_hash */
ulong srv_n_page_hash_locks = 16;
/** innodb_lru_scan_depth; number of blocks scanned in LRU flush batch */
ulong srv_LRU_scan_depth;
@@ -972,7 +972,7 @@ srv_printf_innodb_monitor(
const hash_table_t* table = btr_search_sys->hash_tables[i];
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
- /* this is only used for buf_pool->page_hash */
+ /* this is only used for buf_pool.page_hash */
ut_ad(!table->heaps);
/* this is used for the adaptive hash index */
ut_ad(table->heap);
@@ -1173,7 +1173,7 @@ srv_export_innodb_status(void)
export_vars.innodb_data_written = srv_stats.data_written;
export_vars.innodb_buffer_pool_read_requests
- = buf_pool->stat.n_page_gets;
+ = buf_pool.stat.n_page_gets;
export_vars.innodb_buffer_pool_write_requests =
srv_stats.buf_pool_write_requests;
@@ -1187,48 +1187,48 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_reads = srv_stats.buf_pool_reads;
export_vars.innodb_buffer_pool_read_ahead_rnd =
- buf_pool->stat.n_ra_pages_read_rnd;
+ buf_pool.stat.n_ra_pages_read_rnd;
export_vars.innodb_buffer_pool_read_ahead =
- buf_pool->stat.n_ra_pages_read;
+ buf_pool.stat.n_ra_pages_read;
export_vars.innodb_buffer_pool_read_ahead_evicted =
- buf_pool->stat.n_ra_pages_evicted;
+ buf_pool.stat.n_ra_pages_evicted;
export_vars.innodb_buffer_pool_pages_data =
- UT_LIST_GET_LEN(buf_pool->LRU);
+ UT_LIST_GET_LEN(buf_pool.LRU);
export_vars.innodb_buffer_pool_bytes_data =
- buf_pool->stat.LRU_bytes
- + (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
+ buf_pool.stat.LRU_bytes
+ + (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
<< srv_page_size_shift);
export_vars.innodb_buffer_pool_pages_dirty =
- UT_LIST_GET_LEN(buf_pool->flush_list);
+ UT_LIST_GET_LEN(buf_pool.flush_list);
export_vars.innodb_buffer_pool_pages_made_young
- = buf_pool->stat.n_pages_made_young;
+ = buf_pool.stat.n_pages_made_young;
export_vars.innodb_buffer_pool_pages_made_not_young
- = buf_pool->stat.n_pages_not_made_young;
+ = buf_pool.stat.n_pages_not_made_young;
- export_vars.innodb_buffer_pool_pages_old = buf_pool->LRU_old_len;
+ export_vars.innodb_buffer_pool_pages_old = buf_pool.LRU_old_len;
export_vars.innodb_buffer_pool_bytes_dirty =
- buf_pool->stat.flush_list_bytes;
+ buf_pool.stat.flush_list_bytes;
export_vars.innodb_buffer_pool_pages_free =
- UT_LIST_GET_LEN(buf_pool->free);
+ UT_LIST_GET_LEN(buf_pool.free);
#ifdef UNIV_DEBUG
export_vars.innodb_buffer_pool_pages_latched =
buf_get_latched_pages_number();
#endif /* UNIV_DEBUG */
- export_vars.innodb_buffer_pool_pages_total = buf_pool_get_n_pages();
+ export_vars.innodb_buffer_pool_pages_total = buf_pool.get_n_pages();
export_vars.innodb_buffer_pool_pages_misc =
- buf_pool_get_n_pages()
- - UT_LIST_GET_LEN(buf_pool->LRU)
- - UT_LIST_GET_LEN(buf_pool->free);
+ buf_pool.get_n_pages()
+ - UT_LIST_GET_LEN(buf_pool.LRU)
+ - UT_LIST_GET_LEN(buf_pool.free);
export_vars.innodb_max_trx_id = trx_sys.get_max_trx_id();
export_vars.innodb_history_list_length = trx_sys.rseg_history_len;
@@ -1254,11 +1254,11 @@ srv_export_innodb_status(void)
export_vars.innodb_dblwr_writes = srv_stats.dblwr_writes;
- export_vars.innodb_pages_created = buf_pool->stat.n_pages_created;
+ export_vars.innodb_pages_created = buf_pool.stat.n_pages_created;
- export_vars.innodb_pages_read = buf_pool->stat.n_pages_read;
+ export_vars.innodb_pages_read = buf_pool.stat.n_pages_read;
- export_vars.innodb_pages_written = buf_pool->stat.n_pages_written;
+ export_vars.innodb_pages_written = buf_pool.stat.n_pages_written;
export_vars.innodb_row_lock_waits = srv_stats.n_lock_wait_count;
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index c4bc50dd93a..ecfcc499004 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1308,7 +1308,7 @@ dberr_t srv_start(bool create_new_db)
<< srv_buf_pool_size
<< ", chunk size = " << srv_buf_pool_chunk_unit;
- if (buf_pool_init()) {
+ if (buf_pool.create()) {
ib::error() << "Cannot allocate memory for the buffer pool";
return(srv_init_abort(DB_ERROR));
@@ -2172,11 +2172,8 @@ void innodb_shutdown()
pars_lexer_close();
recv_sys.close();
- ut_ad(buf_pool || !srv_was_started);
- if (buf_pool) {
- buf_pool_free();
- }
-
+ ut_ad(buf_pool.is_initialised() || !srv_was_started);
+ buf_pool.close();
sync_check_close();
if (srv_was_started && srv_print_verbose_log) {
diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc
index fc4bb262748..bc445bd4604 100644
--- a/storage/innobase/sync/sync0debug.cc
+++ b/storage/innobase/sync/sync0debug.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -828,16 +828,16 @@ LatchDebug::check_order(
case SYNC_BUF_PAGE_HASH:
/* Multiple page_hash locks are only allowed during
- buf_validate and that is where buf_pool mutex is already
+ buf_pool.validate() and that is where buf_pool mutex is already
held. */
/* Fall through */
case SYNC_BUF_BLOCK:
- /* Either the thread must own the (buffer pool) buf_pool->mutex
+ /* Either the thread must own the (buffer pool) buf_pool.mutex
or it is allowed to latch only ONE of (buffer block)
- block->mutex or buf_pool->zip_mutex. */
+ block->mutex or buf_pool.zip_mutex. */
if (less(latches, level) != NULL) {
basic_check(latches, level, level - 1);
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index f451659d2ca..88f3380c239 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1897,7 +1897,8 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
if (uint16_t offset = trx_undo_page_report_rename(
trx, table, block, &mtr)) {
- undo->withdraw_clock = buf_withdraw_clock;
+ undo->withdraw_clock
+ = buf_pool.withdraw_clock();
undo->top_page_no = undo->last_page_no;
undo->top_offset = offset;
undo->top_undo_no = trx->undo_no++;
@@ -2046,7 +2047,7 @@ trx_undo_report_row_operation(
mtr_commit(&mtr);
} else {
/* Success */
- undo->withdraw_clock = buf_withdraw_clock;
+ undo->withdraw_clock = buf_pool.withdraw_clock();
mtr_commit(&mtr);
undo->top_page_no = undo_block->page.id.page_no();
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 4b4d71611ed..56d71497027 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -1154,7 +1154,7 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
return buf_page_get_gen(
page_id_t(undo->rseg->space->id, undo->last_page_no),
0, RW_X_LATCH,
- buf_pool_is_obsolete(undo->withdraw_clock)
+ buf_pool.is_obsolete(undo->withdraw_clock)
? NULL : undo->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
}
@@ -1210,7 +1210,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
return buf_page_get_gen(
page_id_t(rseg->space->id, (*undo)->last_page_no),
0, RW_X_LATCH,
- buf_pool_is_obsolete((*undo)->withdraw_clock)
+ buf_pool.is_obsolete((*undo)->withdraw_clock)
? NULL : (*undo)->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
}