diff options
author | Marko Mäkelä <marko.makela@mariadb.com> | 2020-03-17 11:24:59 +0200 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2020-03-17 11:24:59 +0200 |
commit | 7aaba7413c68f535fab0f3e78e9956e21fb20e67 (patch) | |
tree | 8889af2851a3c7a98e4fb531ca12d6ba6b1261f2 | |
parent | 3c98a7926f842b780890bad33b89f389a6f7e2cc (diff) | |
download | mariadb-git-bb-10.5-MDEV-15053-2.tar.gz |
Rename buf_pool->mutex back from buf_pool->LRU_list_mutexbb-10.5-MDEV-15053-2
-rw-r--r-- | storage/innobase/btr/btr0cur.cc | 4 | ||||
-rw-r--r-- | storage/innobase/btr/btr0sea.cc | 4 | ||||
-rw-r--r-- | storage/innobase/buf/buf0buf.cc | 116 | ||||
-rw-r--r-- | storage/innobase/buf/buf0dblwr.cc | 2 | ||||
-rw-r--r-- | storage/innobase/buf/buf0dump.cc | 8 | ||||
-rw-r--r-- | storage/innobase/buf/buf0flu.cc | 69 | ||||
-rw-r--r-- | storage/innobase/buf/buf0lru.cc | 106 | ||||
-rw-r--r-- | storage/innobase/buf/buf0rea.cc | 6 | ||||
-rw-r--r-- | storage/innobase/handler/ha_innodb.cc | 6 | ||||
-rw-r--r-- | storage/innobase/handler/i_s.cc | 4 | ||||
-rw-r--r-- | storage/innobase/include/buf0buf.h | 24 | ||||
-rw-r--r-- | storage/innobase/include/buf0buf.ic | 16 | ||||
-rw-r--r-- | storage/innobase/include/sync0sync.h | 2 | ||||
-rw-r--r-- | storage/innobase/lock/lock0lock.cc | 4 | ||||
-rw-r--r-- | storage/innobase/sync/sync0debug.cc | 2 | ||||
-rw-r--r-- | storage/innobase/sync/sync0sync.cc | 2 |
16 files changed, 188 insertions, 187 deletions
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index d44a0283664..b69b43d0a4c 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -7062,7 +7062,7 @@ btr_blob_free( mtr_commit(mtr); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); buf_page_mutex_enter(block); /* Only free the block if it is still allocated to @@ -7085,7 +7085,7 @@ btr_blob_free( } if (!freed) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); buf_page_mutex_exit(block); } } diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index cf95f11e419..41d7be9e2a7 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -2023,7 +2023,7 @@ btr_search_hash_table_validate(ulint hash_table_id) /* Prevent BUF_BLOCK_FILE_PAGE -> BUF_BLOCK_REMOVE_HASH transition until we lock the block mutex */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); if (UNIV_LIKELY(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE)) { @@ -2056,7 +2056,7 @@ btr_search_hash_table_validate(ulint hash_table_id) } mutex_enter(&block->mutex); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); ut_a(!dict_index_is_ibuf(block->index)); ut_ad(block->page.id.space() diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 4f92b4d1dc6..367df60956d 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -130,7 +130,7 @@ The buffer buf_pool contains several mutexes which protect all the control data structures of the buf_pool. The content of a buffer frame is protected by a separate read-write lock in its control block, though. -buf_pool->LRU_list_mutex protects the LRU_list; +buf_pool->mutex protects the buf_pool->LRU list and buf_page_t::state; buf_pool->free_list_mutex protects the free_list and withdraw list; buf_pool->flush_state_mutex protects the flush state related data structures; buf_pool->zip_free mutex protects the zip_free arrays; @@ -1606,7 +1606,7 @@ bool buf_pool_init() /* 1. Initialize general fields ------------------------------- */ - mutex_create(LATCH_ID_BUF_POOL_LRU_LIST, &buf_pool->LRU_list_mutex); + mutex_create(LATCH_ID_BUF_POOL_LRU_LIST, &buf_pool->mutex); mutex_create(LATCH_ID_BUF_POOL_FREE_LIST, &buf_pool->free_list_mutex); mutex_create(LATCH_ID_BUF_POOL_ZIP_FREE, &buf_pool->zip_free_mutex); mutex_create(LATCH_ID_BUF_POOL_ZIP_HASH, &buf_pool->zip_hash_mutex); @@ -1712,13 +1712,13 @@ bool buf_pool_init() new(&buf_pool->flush_hp) FlushHp(&buf_pool->flush_list_mutex); /* Initialize the hazard pointer for LRU batches */ - new(&buf_pool->lru_hp) LRUHp(&buf_pool->LRU_list_mutex); + new(&buf_pool->lru_hp) LRUHp(&buf_pool->mutex); /* Initialize the iterator for LRU scan search */ - new(&buf_pool->lru_scan_itr) LRUItr(&buf_pool->LRU_list_mutex); + new(&buf_pool->lru_scan_itr) LRUItr(&buf_pool->mutex); /* Initialize the iterator for single page scan search */ - new(&buf_pool->single_scan_itr) LRUItr(&buf_pool->LRU_list_mutex); + new(&buf_pool->single_scan_itr) LRUItr(&buf_pool->mutex); /* Initialize the temporal memory array and slots */ new(&buf_pool->io_buf) buf_pool_t::io_buf_t( @@ -1742,7 +1742,7 @@ void buf_pool_free() buf_page_t* bpage; buf_page_t* prev_bpage = 0; - mutex_free(&buf_pool->LRU_list_mutex); + mutex_free(&buf_pool->mutex); mutex_free(&buf_pool->free_list_mutex); mutex_free(&buf_pool->zip_free_mutex); mutex_free(&buf_pool->zip_hash_mutex); @@ -1818,7 +1818,7 @@ static bool buf_page_realloc(buf_block_t* block) buf_block_t* new_block; ut_ad(buf_pool_withdrawing); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); new_block = buf_LRU_get_free_only(); @@ -2039,9 +2039,9 @@ static bool buf_pool_withdraw_blocks() /* Minimize buf_pool->zip_free[i] lists */ buf_buddy_condense_free(); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); lru_len = UT_LIST_GET_LEN(buf_pool->LRU); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mutex_enter(&buf_pool->free_list_mutex); while (UT_LIST_GET_LEN(buf_pool->withdraw) @@ -2112,7 +2112,7 @@ static bool buf_pool_withdraw_blocks() /* relocate blocks/buddies in withdrawn area */ ulint count2 = 0; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); buf_page_t* bpage; bpage = UT_LIST_GET_FIRST(buf_pool->LRU); while (bpage != NULL) { @@ -2164,7 +2164,7 @@ static bool buf_pool_withdraw_blocks() bpage = next_bpage; } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mutex_enter(&buf_pool->free_list_mutex); @@ -2501,7 +2501,7 @@ withdraw_retry: have no pointers to them from the buffer pool nor from any other thread except for the freeing one to remove redundant locking. The same applies to freshly allocated pages before any pointers to them are published.*/ - mutex_enter(&(buf_pool->LRU_list_mutex)); + mutex_enter(&(buf_pool->mutex)); hash_lock_x_all(buf_pool->page_hash); mutex_enter(&(buf_pool->zip_free_mutex)); mutex_enter(&(buf_pool->free_list_mutex)); @@ -2664,7 +2664,7 @@ calc_buf_pool_size: mutex_exit(&buf_pool->free_list_mutex); mutex_exit(&buf_pool->zip_free_mutex); hash_unlock_x_all(buf_pool->page_hash); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); if (buf_pool->page_hash_old != NULL) { hash_table_free(buf_pool->page_hash_old); @@ -2823,7 +2823,7 @@ buf_relocate( { buf_page_t* b; - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_page_hash_lock_held_x(bpage)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); @@ -3147,13 +3147,13 @@ the buffer pool. @param[in,out] bpage buffer block of a file page */ void buf_page_make_young(buf_page_t* bpage) { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); ut_a(buf_page_in_file(bpage)); buf_LRU_make_block_young(bpage); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } /** Mark the page status as FREED for the given tablespace id and @@ -3222,13 +3222,13 @@ static void buf_block_try_discard_uncompressed(const page_id_t page_id) { buf_page_t* bpage; - /* Since we need to acquire buf_pool->LRU_list_mutex to discard + /* Since we need to acquire buf_pool->mutex to discard the uncompressed frame and because page_hash mutex resides below - buf_pool->LRU_list_mutex in sync ordering therefore we must first + buf_pool->mutex in sync ordering therefore we must first release the page_hash mutex. This means that the block in question can move out of page_hash. Therefore we need to check again if the block is still in page_hash. */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); bpage = buf_page_hash_get(page_id); @@ -3245,7 +3245,7 @@ static void buf_block_try_discard_uncompressed(const page_id_t page_id) mutex_exit(block_mutex); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } /** Get read access to a compressed page (usually of type @@ -3939,7 +3939,7 @@ got_block: if (UNIV_UNLIKELY(mode == BUF_EVICT_IF_IN_POOL)) { evict_from_pool: ut_ad(!fix_block->page.oldest_modification); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); fix_mutex = buf_page_get_mutex( &fix_block->page); @@ -3951,7 +3951,7 @@ evict_from_pool: } // buf_LRU_free_page frees the mutexes we locked. ut_ad(!mutex_own(fix_mutex)); - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); return(NULL); } @@ -4003,7 +4003,7 @@ evict_from_pool: block = buf_LRU_get_free_block(); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); rw_lock_x_lock(hash_lock); @@ -4027,7 +4027,7 @@ evict_from_pool: This should be extremely unlikely, for example, if buf_page_get_zip() was invoked. */ - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); rw_lock_x_unlock(hash_lock); buf_page_mutex_exit(block); buf_LRU_block_free_non_file_page(block); @@ -4073,7 +4073,7 @@ evict_from_pool: /* Insert at the front of unzip_LRU list */ buf_unzip_LRU_add_block(block, FALSE); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); buf_block_set_io_fix(block, BUF_IO_READ); rw_lock_x_lock_inline(&block->lock, 0, file, line); @@ -4141,14 +4141,14 @@ evict_from_pool: /* Try to evict the block from the buffer pool, to use the insert buffer (change buffer) as much as possible. */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); fix_block->unfix(); - /* Now we are only holding the buf_pool->LRU_list_mutex, + /* Now we are only holding the buf_pool->mutex, not block->mutex or hash_lock. Blocks cannot be relocated or enter or exit the buf_pool while we - are holding the buf_pool->LRU_list_mutex. */ + are holding the buf_pool->mutex. */ fix_mutex = buf_page_get_mutex(&fix_block->page); mutex_enter(fix_mutex); @@ -4158,7 +4158,7 @@ evict_from_pool: if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) { /* Hold LRU list mutex, see comment in buf_pool_watch_set(). */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); } /* page_hash can be changed. */ @@ -4173,7 +4173,7 @@ evict_from_pool: buffer pool in the first place. */ block = (buf_block_t*) buf_pool_watch_set( page_id, &hash_lock); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } else { block = (buf_block_t*) buf_page_hash_get_low( page_id); @@ -4201,7 +4201,7 @@ evict_from_pool: goto loop; } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); buf_page_mutex_exit(fix_block); @@ -4675,7 +4675,7 @@ buf_page_init_for_read( data = buf_buddy_alloc(zip_size); } - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); hash_lock = buf_page_hash_lock_get(page_id); rw_lock_x_lock(hash_lock); @@ -4687,7 +4687,7 @@ buf_page_init_for_read( /* The page is already in the buffer pool. */ watch_page = NULL; - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); rw_lock_x_unlock(hash_lock); @@ -4736,7 +4736,7 @@ buf_page_init_for_read( buf_unzip_LRU_add_block(block, TRUE); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); watch_page = buf_page_hash_get_low(page_id); /* We set a pass-type x-lock on the frame because then the same thread which called for the read operation @@ -4802,7 +4802,7 @@ buf_page_init_for_read( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_LRU_insert_zip_clean(bpage); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); buf_page_set_io_fix(bpage, BUF_IO_READ); @@ -4848,7 +4848,7 @@ buf_page_create( free_block = buf_LRU_get_free_block(); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); hash_lock = buf_page_hash_lock_get(page_id); rw_lock_x_lock(hash_lock); @@ -4859,7 +4859,7 @@ buf_page_create( && buf_page_in_file(&block->page) && !buf_pool_watch_is_sentinel(&block->page)) { /* Page can be found in buf_pool */ - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); rw_lock_x_unlock(hash_lock); buf_block_free(free_block); @@ -4901,16 +4901,16 @@ buf_page_create( if (zip_size) { /* Prevent race conditions during buf_buddy_alloc(), - which may release and reacquire buf_pool->LRU_list_mutex, + which may release and reacquire buf_pool->mutex, by IO-fixing and X-latching the block. */ buf_page_set_io_fix(&block->page, BUF_IO_READ); rw_lock_x_lock(&block->lock); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); buf_page_mutex_exit(block); block->page.zip.data = buf_buddy_alloc(zip_size); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); buf_page_mutex_enter(block); /* To maintain the invariant @@ -4925,7 +4925,7 @@ buf_page_create( rw_lock_x_unlock(&block->lock); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX); @@ -5101,7 +5101,7 @@ buf_corrupt_page_release(buf_page_t* bpage, const fil_space_t* space) rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id); /* First unfix and release lock on the bpage */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); rw_lock_x_lock(hash_lock); @@ -5132,7 +5132,7 @@ buf_corrupt_page_release(buf_page_t* bpage, const fil_space_t* space) ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X) && !rw_lock_own(hash_lock, RW_LOCK_S)); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); ut_ad(buf_pool->n_pend_reads > 0); buf_pool->n_pend_reads--; @@ -5444,7 +5444,7 @@ release_page: } - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); BPageMutex* page_mutex = buf_page_get_mutex(bpage); mutex_enter(page_mutex); @@ -5460,7 +5460,7 @@ release_page: have_LRU_mutex = true; /* optimistic */ } else { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } @@ -5521,7 +5521,7 @@ release_page: mutex_exit(buf_page_get_mutex(bpage)); } if (have_LRU_mutex) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } } @@ -5534,7 +5534,7 @@ release_page: /** Assert that all buffer pool pages are in a replaceable state */ void buf_assert_all_freed() { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); buf_chunk_t* chunk = buf_pool->chunks; for (ulint i = buf_pool->n_chunks; i--; chunk++) { @@ -5545,7 +5545,7 @@ void buf_assert_all_freed() } } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } /** Refresh the statistics used to print per-second averages. */ @@ -5559,7 +5559,7 @@ void buf_refresh_io_stats() All pages must be in a replaceable state (not modified or latched). */ void buf_pool_invalidate() { - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); mutex_enter(&buf_pool->flush_state_mutex); @@ -5590,7 +5590,7 @@ void buf_pool_invalidate() while (buf_LRU_scan_and_free_block(true)); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0); ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0); @@ -5599,7 +5599,7 @@ void buf_pool_invalidate() buf_pool->LRU_old = NULL; buf_pool->LRU_old_len = 0; - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat)); buf_refresh_io_stats(); @@ -5620,7 +5620,7 @@ void buf_validate() ulint n_free = 0; ulint n_zip = 0; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); hash_lock_x_all(buf_pool->page_hash); mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool->free_list_mutex); @@ -5785,7 +5785,7 @@ void buf_validate() ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); if (buf_pool->curr_size == buf_pool->old_size && UT_LIST_GET_LEN(buf_pool->free) > n_free) { @@ -5826,7 +5826,7 @@ void buf_print() counts = static_cast<ulint*>(ut_malloc_nokey(sizeof(ulint) * size)); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); mutex_enter(&buf_pool->free_list_mutex); mutex_enter(&buf_pool->flush_state_mutex); mutex_enter(&buf_pool->flush_list_mutex); @@ -5891,7 +5891,7 @@ void buf_print() } } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); for (i = 0; i < n_found; i++) { index = dict_index_get_if_in_cache(index_ids[i]); @@ -5924,7 +5924,7 @@ ulint buf_get_latched_pages_number() buf_chunk_t* chunk; ulint fixed_pages_number = 0; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); chunk = buf_pool->chunks; @@ -5949,7 +5949,7 @@ ulint buf_get_latched_pages_number() } } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mutex_enter(&buf_pool->zip_mutex); @@ -6302,7 +6302,7 @@ buf_page_get_trim_length( ulint write_length) { return bpage->physical_size() - write_length; - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(&buf_pool->free_list_mutex)); ut_ad(mutex_own(&buf_pool->flush_state_mutex)); ut_ad(mutex_own(&buf_pool->flush_list_mutex)); diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 3e0a23e0b46..580ea7bdee6 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -1038,7 +1038,7 @@ buf_dblwr_add_to_batch( buf_page_t* bpage) { ut_a(buf_page_in_file(bpage)); - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); try_again: mutex_enter(&buf_dblwr->mutex); diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index e1f2931a546..f697df5801d 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -286,13 +286,13 @@ buf_dump( ulint n_pages; ulint j; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); n_pages = UT_LIST_GET_LEN(buf_pool->LRU); /* skip empty buffer pools */ if (n_pages == 0) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); goto done; } @@ -320,7 +320,7 @@ buf_dump( n_pages * sizeof(*dump))); if (dump == NULL) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); fclose(f); buf_dump_status(STATUS_ERR, "Cannot allocate " ULINTPF " bytes: %s", @@ -345,7 +345,7 @@ buf_dump( bpage->id.page_no()); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); ut_a(j <= n_pages); n_pages = j; diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 2320026e5c8..1a10c60e8df 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -383,8 +383,9 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn) if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) { ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); - /* The field in_LRU_list is protected by buf_pool->LRU_list_mutex, - which we are not holding. However, while a block is in the flush + /* The field in_LRU_list is protected by buf_pool->mutex, + which we are not holding. However, while a block is in the + flush list, it is dirty and cannot be discarded, not from the page_hash or from the LRU list. At most, the uncompressed page frame of a compressed block may be discarded or created (copying the block->page to or from a buf_page_t that is @@ -424,7 +425,7 @@ ibool buf_flush_ready_for_replace( buf_page_t* bpage) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(bpage->in_LRU_list); @@ -458,7 +459,7 @@ buf_flush_ready_for_flush( || (buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH - && !mutex_own(&buf_pool->LRU_list_mutex))); + && !mutex_own(&buf_pool->mutex))); #else @@ -514,7 +515,7 @@ void buf_flush_remove(buf_page_t* bpage) ut_ad(mutex_own(buf_page_get_mutex(bpage))); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_DIRTY - || mutex_own(&buf_pool->LRU_list_mutex)); + || mutex_own(&buf_pool->mutex)); #endif ut_ad(bpage->in_flush_list); @@ -1062,7 +1063,7 @@ static void buf_flush_freed_page(buf_page_t *bpage, fil_space_t *space) ? &reinterpret_cast<buf_block_t*>(bpage)->mutex : &buf_pool->zip_mutex; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); mutex_enter(block_mutex); buf_page_set_io_fix(bpage, BUF_IO_NONE); @@ -1074,7 +1075,7 @@ static void buf_flush_freed_page(buf_page_t *bpage, fil_space_t *space) BUF_IO_WRITE); buf_pool->stat.n_pages_written++; - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); const page_id_t page_id(bpage->id); const auto zip_size= bpage->zip_size(); mutex_exit(block_mutex); @@ -1131,8 +1132,8 @@ buf_flush_write_block_low( /* We are not holding block_mutex here. Nevertheless, it is safe to access bpage, because it is io_fixed and oldest_modification != 0. - Thus, it cannot be relocated in the buffer pool or removed from - flush_list or LRU_list. */ + Thus, it cannot be relocated in the buf_pool or removed from + buf_pool->flush_list or buf_pool->LRU. */ ut_ad(!mutex_own(&buf_pool->flush_list_mutex)); ut_ad(!buf_page_get_mutex(bpage)->is_owned()); @@ -1273,7 +1274,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync) list page or to restart the LRU scan in buf_flush_single_page_from_LRU(). */ ut_ad((flush_type == BUF_FLUSH_SINGLE_PAGE) - == mutex_own(&buf_pool->LRU_list_mutex)); + == mutex_own(&buf_pool->mutex)); ut_ad(buf_page_in_file(bpage)); ut_ad(!sync || flush_type == BUF_FLUSH_SINGLE_PAGE); @@ -1327,7 +1328,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync) switch (flush_type) { case BUF_FLUSH_SINGLE_PAGE: - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); break; case BUF_FLUSH_LRU: case BUF_FLUSH_N_TYPES: @@ -1353,7 +1354,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync) /* Even though bpage is not protected by any mutex at this point, it is safe to access bpage, because it is io_fixed and oldest_modification != 0. Thus, it cannot be relocated in the - buffer pool or removed from flush_list or LRU_list. */ + buf_pool or removed from buf_pool->flush_list or buf_pool->LRU. */ buf_flush_write_block_low(bpage, flush_type, sync); return true; @@ -1368,7 +1369,7 @@ buf_flush_batch() and buf_flush_page(). @return whether the page was flushed and the mutex released */ bool buf_flush_page_try(buf_block_t* block) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(mutex_own(buf_page_get_mutex(&block->page))); @@ -1376,7 +1377,7 @@ bool buf_flush_page_try(buf_block_t* block) return false; } - /* The following call will release the LRU_list and block mutex. */ + /* The following will have released the mutexes when returning true. */ return buf_flush_page(&block->page, BUF_FLUSH_SINGLE_PAGE, true); } # endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ @@ -1450,7 +1451,7 @@ buf_flush_try_neighbors( ulint count = 0; ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); ut_ad(!mutex_own(&buf_pool->flush_list_mutex)); fil_space_t* space = fil_space_acquire_for_io(page_id.space()); if (!space) { @@ -1623,7 +1624,7 @@ buf_flush_page_and_try_neighbors( ut_ad(flush_type != BUF_FLUSH_SINGLE_PAGE); ut_ad((flush_type == BUF_FLUSH_LRU - && mutex_own(&buf_pool->LRU_list_mutex)) + && mutex_own(&buf_pool->mutex)) || (flush_type == BUF_FLUSH_LIST && mutex_own(&buf_pool->flush_list_mutex))); @@ -1635,7 +1636,7 @@ buf_flush_page_and_try_neighbors( #ifdef UNIV_DEBUG if (!buf_page_in_file(bpage)) { ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH); - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); } #else ut_a(buf_page_in_file(bpage) @@ -1644,7 +1645,7 @@ buf_flush_page_and_try_neighbors( if (buf_flush_ready_for_flush(bpage, flush_type)) { if (flush_type == BUF_FLUSH_LRU) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } const page_id_t page_id = bpage->id; @@ -1660,7 +1661,7 @@ buf_flush_page_and_try_neighbors( page_id, flush_type, *count, n_to_flush); if (flush_type == BUF_FLUSH_LRU) { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); } else { mutex_enter(&buf_pool->flush_list_mutex); } @@ -1675,7 +1676,7 @@ buf_flush_page_and_try_neighbors( } ut_ad((flush_type == BUF_FLUSH_LRU - && mutex_own(&buf_pool->LRU_list_mutex)) + && mutex_own(&buf_pool->mutex)) || (flush_type == BUF_FLUSH_LIST && mutex_own(&buf_pool->flush_list_mutex))); @@ -1698,7 +1699,7 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max) ulint free_len = UT_LIST_GET_LEN(buf_pool->free); ulint lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); @@ -1716,7 +1717,7 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max) if (buf_LRU_free_page(&block->page, false)) { /* Block was freed, all mutexes released */ ++count; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); } else { @@ -1729,7 +1730,7 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max) lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU); } - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); if (scanned) { MONITOR_INC_VALUE_CUMULATIVE( @@ -1758,7 +1759,7 @@ buf_flush_LRU_list_batch(ulint max, flush_counters_t* n) ulint lru_len = UT_LIST_GET_LEN(buf_pool->LRU); ulint withdraw_depth; - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); n->evicted = 0; withdraw_depth = buf_get_withdraw_depth(buf_pool); @@ -1781,7 +1782,7 @@ buf_flush_LRU_list_batch(ulint max, flush_counters_t* n) clean and is not IO-fixed or buffer fixed. */ if (buf_LRU_free_page(bpage, true)) { ++n->evicted; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); } else { mutex_exit(block_mutex); } @@ -1803,7 +1804,7 @@ buf_flush_LRU_list_batch(ulint max, flush_counters_t* n) } ut_ad(!mutex_own(block_mutex)); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); free_len = UT_LIST_GET_LEN(buf_pool->free); lru_len = UT_LIST_GET_LEN(buf_pool->LRU); @@ -1816,7 +1817,7 @@ buf_flush_LRU_list_batch(ulint max, flush_counters_t* n) should be flushed, we factor in this value. */ buf_lru_flush_page_count += n->flushed; - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); if (n->evicted) { MONITOR_INC_VALUE_CUMULATIVE( @@ -1842,7 +1843,7 @@ Whether LRU or unzip_LRU is used depends on the state of the system. @param[out] n counts of flushed and evicted pages */ static void buf_do_LRU_batch(ulint max, flush_counters_t* n) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); n->unzip_LRU_evicted = buf_LRU_evict_from_unzip_LRU() ? buf_free_from_unzip_LRU_list_batch(max) : 0; @@ -1960,9 +1961,9 @@ buf_flush_batch( the flush functions. */ switch (flush_type) { case BUF_FLUSH_LRU: - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); buf_do_LRU_batch(min_n, n); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); break; case BUF_FLUSH_LIST: n->flushed = buf_do_flush_list_batch(min_n, lsn_limit); @@ -2175,14 +2176,14 @@ bool buf_flush_single_page_from_LRU() buf_page_t* bpage; ibool freed; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); for (bpage = buf_pool->single_scan_itr.start(), scanned = 0, freed = false; bpage != NULL; ++scanned, bpage = buf_pool->single_scan_itr.get()) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage); buf_pool->single_scan_itr.set(prev); @@ -2230,7 +2231,7 @@ bool buf_flush_single_page_from_LRU() if (!freed) { /* Can't find a single flushable page. */ ut_ad(!bpage); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } if (scanned) { @@ -2241,7 +2242,7 @@ bool buf_flush_single_page_from_LRU() scanned); } - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); return(freed); } diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index e9eec1ad391..24982795a5d 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -63,7 +63,7 @@ static const ulint BUF_LRU_OLD_TOLERANCE = 20; /** When dropping the search hash index entries before deleting an ibd file, we build a local array of pages belonging to that tablespace in the buffer pool. Following is the size of that array. -We also release buf_pool->LRU_list_mutex after scanning this many pages of the +We also release buf_pool->mutex after scanning this many pages of the flush_list when dropping a table. This is to ensure that other threads are not blocked for extended period of time when using very large buffer pools. */ @@ -129,7 +129,7 @@ uint buf_LRU_old_threshold_ms; If the block is compressed-only (BUF_BLOCK_ZIP_PAGE), the object will be freed. -The caller must hold buf_pool->LRU_list_mutex, the buf_page_get_mutex() mutex +The caller must hold buf_pool->mutex, the buf_page_get_mutex() mutex and the appropriate hash_lock. This function will release the buf_page_get_mutex() and the hash_lock. @@ -165,7 +165,7 @@ buf_LRU_block_free_hashed_page( @param[in] bpage control block */ static inline void incr_LRU_size_in_bytes(const buf_page_t* bpage) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); buf_pool->stat.LRU_bytes += bpage->physical_size(); @@ -176,7 +176,7 @@ static inline void incr_LRU_size_in_bytes(const buf_page_t* bpage) instead of the general LRU list */ bool buf_LRU_evict_from_unzip_LRU() { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); /* If the unzip_LRU list is empty, we can only use the LRU. */ if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) { @@ -248,7 +248,7 @@ buf_LRU_drop_page_hash_for_tablespace(ulint id) ulint num_entries = 0; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); scan_again: for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU); @@ -307,15 +307,15 @@ next_page: /* Array full. We release the LRU list mutex to obey the latching order. */ - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); buf_LRU_drop_page_hash_batch(id, page_arr, num_entries); num_entries = 0; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); - /* Note that we released the buf_pool->LRU_list_mutex above + /* Note that we released the buf_pool->mutex above after reading the prev_bpage during processing of a page_hash_batch (i.e.: when the array was full). Because prev_bpage could belong to a compressed-only @@ -337,7 +337,7 @@ next_page: } } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); /* Drop any remaining batch of search hashed pages. */ buf_LRU_drop_page_hash_batch(id, page_arr, num_entries); @@ -370,7 +370,7 @@ static void buf_flush_yield(buf_page_t* bpage) { BPageMutex* block_mutex = buf_page_get_mutex(bpage); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(block_mutex)); ut_ad(buf_page_in_file(bpage)); @@ -380,13 +380,13 @@ static void buf_flush_yield(buf_page_t* bpage) buf_page_set_sticky(bpage); /* Now it is safe to release the LRU list mutex. */ - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mutex_exit(block_mutex); /* Try and force a context switch. */ os_thread_yield(); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); mutex_enter(block_mutex); /* "Unfix" the block now that we have both the @@ -410,7 +410,7 @@ buf_flush_try_yield( restart the flush list scan */ { /* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the - loop we release buf_pool->LRU_list_mutex to let other threads + loop we release buf_pool->mutex to let other threads do their job but only if the block is not IO fixed. This ensures that the block stays in its position in the flush_list. */ @@ -473,11 +473,11 @@ buf_flush_try_yield( @return true if page was removed. */ static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush, bool *must_restart) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(&buf_pool->flush_list_mutex)); /* It is safe to check bpage->space and bpage->io_fix while holding - buf_pool->LRU_list_mutex only. */ + buf_pool->mutex only. */ if (buf_page_get_io_fix_unlocked(bpage) != BUF_IO_NONE) { @@ -531,7 +531,7 @@ static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush, bool *must_r bpage, BUF_FLUSH_SINGLE_PAGE, false); if (processed) { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); } else { mutex_exit(block_mutex); } @@ -542,7 +542,7 @@ static bool buf_flush_or_remove_page(buf_page_t *bpage, bool flush, bool *must_r mutex_enter(&buf_pool->flush_list_mutex); ut_ad(!mutex_own(block_mutex)); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); return(processed); } @@ -561,7 +561,7 @@ static bool buf_flush_or_remove_pages(ulint id, bool flush, ulint first) buf_page_t* bpage; ulint processed = 0; - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); mutex_enter(&buf_pool->flush_list_mutex); rescan: bool must_restart = false; @@ -646,13 +646,13 @@ as they age and move towards the tail of the LRU list. @param[in] first first page to be flushed or evicted */ static void buf_flush_dirty_pages(ulint id, bool flush, ulint first) { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); while (!buf_flush_or_remove_pages(id, flush, first)) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); ut_d(buf_flush_validate()); os_thread_sleep(2000); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); } #ifdef UNIV_DEBUG @@ -676,7 +676,7 @@ static void buf_flush_dirty_pages(ulint id, bool flush, ulint first) mutex_exit(&buf_pool->flush_list_mutex); } #endif - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } /** Empty the flush list for all pages belonging to a tablespace. @@ -704,7 +704,7 @@ void buf_LRU_insert_zip_clean( buf_page_t* bpage) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(&buf_pool->zip_mutex)); ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE); @@ -736,7 +736,7 @@ LRU list. The compressed page is preserved, and it need not be clean. @return true if freed */ static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); if (!buf_LRU_evict_from_unzip_LRU()) { return(false); @@ -786,7 +786,7 @@ static bool buf_LRU_free_from_unzip_LRU_list(bool scan_all) @return whether a page was freed */ static bool buf_LRU_free_from_common_LRU_list(bool scan_all) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ulint scanned = 0; bool freed = false; @@ -850,7 +850,7 @@ bool buf_LRU_scan_and_free_block(bool scan_all) bool freed = false; bool use_unzip_list = UT_LIST_GET_LEN(buf_pool->unzip_LRU) > 0; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); if (use_unzip_list) { freed = buf_LRU_free_from_unzip_LRU_list(scan_all); @@ -861,7 +861,7 @@ bool buf_LRU_scan_and_free_block(bool scan_all) } if (!freed) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } return(freed); @@ -1021,7 +1021,7 @@ buf_block_t* buf_LRU_get_free_block() ulint n_iterations = 0; ulint flush_failures = 0; - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH); loop: @@ -1113,8 +1113,8 @@ not_found: TODO: A more elegant way would have been to return the freed up block to the caller here but the code that deals with - removing the block from page_hash and LRU_list is fairly - involved (particularly in case of compressed pages). We + removing the block from page_hash and LRU is fairly + involved (particularly in case of ROW_FORMAT=COMPRESSED pages). We can do that in a separate patch sometime in future. */ if (!buf_flush_single_page_from_LRU()) { @@ -1137,7 +1137,7 @@ static void buf_LRU_old_adjust_len() ulint new_len; ut_a(buf_pool->LRU_old); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN); ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX); compile_time_assert(BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN @@ -1198,7 +1198,7 @@ static void buf_LRU_old_adjust_len() @param[in,out] buf_pool buffer pool instance */ static void buf_LRU_old_init() { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN); /* We first initialize all blocks in the LRU list as old and then use @@ -1228,7 +1228,7 @@ static void buf_LRU_old_init() static void buf_unzip_LRU_remove_block_if_needed(buf_page_t* bpage) { ut_ad(buf_page_in_file(bpage)); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); if (buf_page_belongs_to_unzip_LRU(bpage)) { buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage); @@ -1253,7 +1253,7 @@ void buf_LRU_adjust_hp(const buf_page_t* bpage) @param[in] bpage control block */ static inline void buf_LRU_remove_block(buf_page_t* bpage) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_a(buf_page_in_file(bpage)); @@ -1333,7 +1333,7 @@ buf_unzip_LRU_add_block( buf_block_t* block, ibool old) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_a(buf_page_belongs_to_unzip_LRU(&block->page)); ut_ad(!block->in_unzip_LRU_list); ut_d(block->in_unzip_LRU_list = TRUE); @@ -1359,7 +1359,7 @@ buf_LRU_add_block_low( buf_page_t* bpage, ibool old) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_a(buf_page_in_file(bpage)); ut_ad(!bpage->in_LRU_list); @@ -1438,7 +1438,7 @@ void buf_LRU_make_block_young( buf_page_t* bpage) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); if (bpage->old) { buf_pool->stat.n_pages_made_young++; @@ -1468,7 +1468,7 @@ buf_LRU_free_page( rw_lock_t* hash_lock = buf_page_hash_lock_get(bpage->id); BPageMutex* block_mutex = buf_page_get_mutex(bpage); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(block_mutex)); ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); @@ -1539,7 +1539,7 @@ not_freed: if (!buf_LRU_block_remove_hashed(bpage, zip)) { - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); if (b != NULL) { buf_page_free_descriptor(b); @@ -1665,7 +1665,7 @@ not_freed: mutex_exit(block_mutex); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); /* Remove possible adaptive hash index on the page. The page was declared uninitialized by @@ -1784,7 +1784,7 @@ buf_LRU_block_free_non_file_page( If the block is compressed-only (BUF_BLOCK_ZIP_PAGE), the object will be freed. -The caller must hold buf_pool->LRU_list_mutex, the buf_page_get_mutex() mutex +The caller must hold buf_pool->mutex, the buf_page_get_mutex() mutex and the appropriate hash_lock. This function will release the buf_page_get_mutex() and the hash_lock. @@ -1810,7 +1810,7 @@ buf_LRU_block_remove_hashed( const buf_page_t* hashed_bpage; rw_lock_t* hash_lock; - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); hash_lock = buf_page_hash_lock_get(bpage->id); @@ -1920,7 +1920,7 @@ buf_LRU_block_remove_hashed( ut_d(mutex_exit(buf_page_get_mutex(bpage))); ut_d(rw_lock_x_unlock(hash_lock)); - ut_d(mutex_exit(&buf_pool->LRU_list_mutex)); + ut_d(mutex_exit(&buf_pool->mutex)); ut_d(buf_print()); ut_d(buf_LRU_print()); ut_d(buf_validate()); @@ -1986,7 +1986,7 @@ buf_LRU_block_remove_hashed( and by the time we'll release it in the caller we'd have inserted the compressed only descriptor in the page_hash. */ - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); rw_lock_x_unlock(hash_lock); mutex_exit(&((buf_block_t*) bpage)->mutex); @@ -2050,7 +2050,7 @@ void buf_LRU_free_one_page(buf_page_t* bpage, page_id_t old_page_id) BPageMutex* block_mutex = buf_page_get_mutex(bpage); rw_lock_t* hash_lock = buf_page_hash_lock_get(old_page_id); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(block_mutex)); ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)); #endif /* UNIV_DEBUG */ @@ -2089,7 +2089,7 @@ uint buf_LRU_old_ratio_update(uint old_pct, bool adjust) } if (adjust) { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); if (ratio != buf_pool->LRU_old_ratio) { buf_pool->LRU_old_ratio = ratio; @@ -2100,7 +2100,7 @@ uint buf_LRU_old_ratio_update(uint old_pct, bool adjust) } } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } else { buf_pool->LRU_old_ratio = ratio; } @@ -2153,7 +2153,7 @@ void buf_LRU_validate() ulint old_len; ulint new_len; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) { @@ -2213,7 +2213,7 @@ void buf_LRU_validate() ut_a(buf_pool->LRU_old_len == old_len); - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mutex_enter(&buf_pool->free_list_mutex); @@ -2228,7 +2228,7 @@ void buf_LRU_validate() mutex_exit(&buf_pool->free_list_mutex); - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); CheckUnzipLRUAndLRUList::validate(); @@ -2241,7 +2241,7 @@ void buf_LRU_validate() ut_a(buf_page_belongs_to_unzip_LRU(&block->page)); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ @@ -2249,7 +2249,7 @@ void buf_LRU_validate() /** Dump the LRU list to stderr. */ void buf_LRU_print() { - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); for (const buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL; @@ -2304,6 +2304,6 @@ void buf_LRU_print() mutex_exit(buf_page_get_mutex(bpage)); } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */ diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index a61d1e14b7d..7eba783b86d 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -61,7 +61,7 @@ buf_read_page_handle_error( rw_lock_t * hash_lock = buf_page_hash_lock_get(bpage->id); /* First unfix and release lock on the bpage */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); rw_lock_x_lock(hash_lock); mutex_enter(buf_page_get_mutex(bpage)); ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ); @@ -84,7 +84,7 @@ buf_read_page_handle_error( ut_ad(buf_pool->n_pend_reads > 0); buf_pool->n_pend_reads--; - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); } @@ -156,7 +156,7 @@ buf_read_page_low( << " unzip=" << unzip << ',' << (sync ? "sync" : "async")); ut_ad(buf_page_in_file(bpage)); - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); if (sync) { thd_wait_begin(NULL, THD_WAIT_DISKIO); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 9b159cbc50d..d0b0b0fa06d 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -507,7 +507,7 @@ static PSI_mutex_info all_innodb_mutexes[] = { PSI_KEY(buffer_block_mutex), # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */ PSI_KEY(buf_pool_flush_state_mutex), - PSI_KEY(buf_pool_LRU_list_mutex), + PSI_KEY(buf_pool_mutex), PSI_KEY(buf_pool_free_list_mutex), PSI_KEY(buf_pool_zip_free_mutex), PSI_KEY(buf_pool_zip_hash_mutex), @@ -18246,7 +18246,7 @@ innodb_buffer_pool_evict_uncompressed() { bool all_evicted = true; - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); block != NULL; ) { @@ -18279,7 +18279,7 @@ innodb_buffer_pool_evict_uncompressed() block = prev_block; } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); return(all_evicted); } diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index ace9138f6d6..3ac82a2ebfc 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -4621,7 +4621,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *) /* Aquire the mutex before allocating info_buffer, since UT_LIST_GET_LEN(buf_pool->LRU) could change */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); lru_len = UT_LIST_GET_LEN(buf_pool->LRU); @@ -4655,7 +4655,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *) ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool->LRU)); exit: - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); if (info_buffer) { status = i_s_innodb_buf_page_lru_fill( diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index e4dee09ab49..3fa619cac9d 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -849,7 +849,7 @@ buf_block_set_io_fix( buf_block_t* block, /*!< in/out: control block */ enum buf_io_fix io_fix);/*!< in: io_fix state */ /** Makes a block sticky. A sticky block implies that even after we release -the buf_pool->LRU_list_mutex and the block->mutex: +the buf_pool->mutex and the block->mutex: * it cannot be removed from the flush_list * the block descriptor cannot be relocated * it cannot be removed from the LRU list @@ -1329,7 +1329,7 @@ public: any one of the two mutexes */ /* @} */ /** @name LRU replacement algorithm fields - These fields are protected by both buf_pool->LRU_list_mutex and the + These fields are protected by both buf_pool->mutex and the block mutex. */ /* @{ */ @@ -1428,7 +1428,7 @@ struct buf_block_t{ a block is in the unzip_LRU list if page.state == BUF_BLOCK_FILE_PAGE and page.zip.data != NULL. Protected by - both LRU_list_mutex and the block + both mutex and the block mutex. */ #ifdef UNIV_DEBUG ibool in_unzip_LRU_list;/*!< TRUE if the page is in the @@ -1754,16 +1754,16 @@ struct buf_pool_stat_t{ as part of read ahead. Not protected. */ ulint n_ra_pages_evicted;/*!< number of read ahead pages that are evicted without - being accessed. Protected by LRU_list_mutex. */ + being accessed. Protected by mutex. */ ulint n_pages_made_young; /*!< number of pages made young, in calls to buf_LRU_make_block_young(). Protected - by LRU_list_mutex. */ + by mutex. */ ulint n_pages_not_made_young; /*!< number of pages not made young because the first access was not long enough ago, in buf_page_peek_if_too_old(). Not protected. */ ulint LRU_bytes; /*!< LRU size in bytes. Protected by - LRU_list_mutex. */ + mutex. */ ulint flush_list_bytes;/*!< flush_list size in bytes. Protected by flush_list_mutex */ }; @@ -1785,7 +1785,7 @@ struct buf_pool_t { /** @name General fields */ /* @{ */ - BufListMutex LRU_list_mutex; /*!< LRU list mutex */ + BufListMutex mutex; /*!< LRU list mutex */ BufListMutex free_list_mutex;/*!< free and withdraw list mutex */ BufListMutex zip_free_mutex; /*!< buddy allocator mutex */ BufListMutex zip_hash_mutex; /*!< zip_hash mutex */ @@ -1899,7 +1899,7 @@ struct buf_pool_t to read this for heuristic purposes without holding any mutex or latch. For non-heuristic - purposes protected by LRU_list_mutex */ + purposes protected by mutex */ ibool try_LRU_scan; /*!< Set to FALSE when an LRU scan for free block fails. This flag is used to avoid repeated @@ -1930,15 +1930,15 @@ struct buf_pool_t block list, when withdrawing */ /** "hazard pointer" used during scan of LRU while doing - LRU list batch. Protected by buf_pool::LRU_list_mutex */ + LRU list batch. Protected by buf_pool::mutex */ LRUHp lru_hp; /** Iterator used to scan the LRU list when searching for - replacable victim. Protected by buf_pool::LRU_list_mutex. */ + replacable victim. Protected by buf_pool::mutex. */ LRUItr lru_scan_itr; /** Iterator used to scan the LRU list when searching for - single page flushing victim. Protected by buf_pool::LRU_list_mutex. */ + single page flushing victim. Protected by buf_pool::mutex. */ LRUItr single_scan_itr; UT_LIST_BASE_NODE_T(buf_page_t) LRU; @@ -1962,7 +1962,7 @@ struct buf_pool_t UT_LIST_BASE_NODE_T(buf_block_t) unzip_LRU; /*!< base node of the unzip_LRU list. The list is protected - by LRU_list_mutex. */ + by mutex. */ /* @} */ /** @name Buddy allocator fields diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index c89467aaa25..c8aa57f073e 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -498,7 +498,7 @@ buf_block_set_io_fix( } /** Makes a block sticky. A sticky block implies that even after we release -the buf_pool->LRU_list_mutex and the block->mutex: +the buf_pool->mutex and the block->mutex: * it cannot be removed from the flush_list * the block descriptor cannot be relocated * it cannot be removed from the LRU list @@ -512,7 +512,7 @@ buf_page_set_sticky( buf_page_t* bpage) { - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE); @@ -565,7 +565,7 @@ buf_page_is_old( purposes even if LRU mutex is not being held. Keep the assertion for now since all the callers hold it. */ ut_ad(mutex_own(buf_page_get_mutex(bpage)) - || mutex_own(&buf_pool->LRU_list_mutex)); + || mutex_own(&buf_pool->mutex)); ut_ad(buf_page_in_file(bpage)); return(bpage->old); @@ -581,7 +581,7 @@ buf_page_set_old( bool old) { ut_a(buf_page_in_file(bpage)); - ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(bpage->in_LRU_list); #ifdef UNIV_LRU_DEBUG @@ -626,7 +626,7 @@ buf_page_set_accessed( /*==================*/ buf_page_t* bpage) /*!< in/out: control block */ { - ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); + ut_ad(!mutex_own(&buf_pool->mutex)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_a(buf_page_in_file(bpage)); @@ -650,9 +650,9 @@ buf_page_get_block( { if (bpage != NULL) { ut_ad(buf_page_hash_lock_held_s_or_x(bpage) - || mutex_own(&buf_pool->LRU_list_mutex)); + || mutex_own(&buf_pool->mutex)); ut_ad(buf_page_hash_lock_held_s_or_x(bpage) - || mutex_own(&buf_pool->LRU_list_mutex)); + || mutex_own(&buf_pool->mutex)); ut_ad(buf_page_in_file(bpage)); if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) { @@ -790,7 +790,7 @@ buf_block_modify_clock_inc( /* No latch is acquired for the shared temporary tablespace. */ ut_ad(fsp_is_system_temporary(block->page.id.space()) || (block->page.buf_fix_count == 0 - && mutex_own(&buf_pool->LRU_list_mutex)) + && mutex_own(&buf_pool->mutex)) || rw_lock_own_flagged(&block->lock, RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX)); assert_block_ahi_valid(block); diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index ed692528e48..6b77bd13bd7 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -52,7 +52,7 @@ instrumentation due to their large number of instances. */ extern mysql_pfs_key_t autoinc_mutex_key; extern mysql_pfs_key_t buffer_block_mutex_key; extern mysql_pfs_key_t buf_pool_flush_state_mutex_key; -extern mysql_pfs_key_t buf_pool_LRU_list_mutex_key; +extern mysql_pfs_key_t buf_pool_mutex_key; extern mysql_pfs_key_t buf_pool_free_list_mutex_key; extern mysql_pfs_key_t buf_pool_zip_free_mutex_key; extern mysql_pfs_key_t buf_pool_zip_hash_mutex_key; diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 663e21058d6..17c9a89cd0b 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -516,7 +516,7 @@ void lock_sys_t::resize(ulint n_cells) hash_table_free(old_hash); /* need to update block->lock_hash_val */ - mutex_enter(&buf_pool->LRU_list_mutex); + mutex_enter(&buf_pool->mutex); for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage; bpage = UT_LIST_GET_NEXT(LRU, bpage)) { if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) { @@ -527,7 +527,7 @@ void lock_sys_t::resize(ulint n_cells) bpage->id.space(), bpage->id.page_no()); } } - mutex_exit(&buf_pool->LRU_list_mutex); + mutex_exit(&buf_pool->mutex); mutex_exit(&mutex); } diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index 02c39937786..41481ff6793 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -1285,7 +1285,7 @@ sync_latch_meta_init() LATCH_ADD_MUTEX(BUF_POOL_LRU_LIST, SYNC_BUF_LRU_LIST, - buf_pool_LRU_list_mutex_key); + buf_pool_mutex_key); LATCH_ADD_MUTEX(BUF_POOL_FREE_LIST, SYNC_BUF_FREE_LIST, diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index ff411fc3c70..9580011b7a2 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -39,7 +39,7 @@ Created 9/5/1995 Heikki Tuuri mysql_pfs_key_t autoinc_mutex_key; mysql_pfs_key_t buffer_block_mutex_key; mysql_pfs_key_t buf_pool_flush_state_mutex_key; -mysql_pfs_key_t buf_pool_LRU_list_mutex_key; +mysql_pfs_key_t buf_pool_mutex_key; mysql_pfs_key_t buf_pool_free_list_mutex_key; mysql_pfs_key_t buf_pool_zip_free_mutex_key; mysql_pfs_key_t buf_pool_zip_hash_mutex_key; |