diff options
Diffstat (limited to 'storage/xtradb/include/buf0lru.h')
-rw-r--r-- | storage/xtradb/include/buf0lru.h | 301 |
1 files changed, 301 insertions, 0 deletions
diff --git a/storage/xtradb/include/buf0lru.h b/storage/xtradb/include/buf0lru.h new file mode 100644 index 00000000000..8abebfb675c --- /dev/null +++ b/storage/xtradb/include/buf0lru.h @@ -0,0 +1,301 @@ +/***************************************************************************** + +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/buf0lru.h +The database buffer pool LRU replacement algorithm + +Created 11/5/1995 Heikki Tuuri +*******************************************************/ + +#ifndef buf0lru_h +#define buf0lru_h + +#include "univ.i" +#include "ut0byte.h" +#include "buf0types.h" + +/******************************************************************//** +Tries to remove LRU flushed blocks from the end of the LRU list and put them +to the free list. This is beneficial for the efficiency of the insert buffer +operation, as flushed pages from non-unique non-clustered indexes are here +taken out of the buffer pool, and their inserts redirected to the insert +buffer. Otherwise, the flushed blocks could get modified again before read +operations need new buffer blocks, and the i/o work done in flushing would be +wasted. */ +UNIV_INTERN +void +buf_LRU_try_free_flushed_blocks(void); +/*==================================*/ +/******************************************************************//** +Returns TRUE if less than 25 % of the buffer pool is available. This can be +used in heuristics to prevent huge transactions eating up the whole buffer +pool for their locks. +@return TRUE if less than 25 % of buffer pool left */ +UNIV_INTERN +ibool +buf_LRU_buf_pool_running_out(void); +/*==============================*/ + +/*####################################################################### +These are low-level functions +#########################################################################*/ + +/** Minimum LRU list length for which the LRU_old pointer is defined */ +#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */ + +/** Maximum LRU list search length in buf_flush_LRU_recommendation() */ +#define BUF_LRU_FREE_SEARCH_LEN (5 + 2 * BUF_READ_AHEAD_AREA) + +/******************************************************************//** +Invalidates all pages belonging to a given tablespace when we are deleting +the data file(s) of that tablespace. A PROBLEM: if readahead is being started, +what guarantees that it will not try to read in pages after this operation has +completed? */ +UNIV_INTERN +void +buf_LRU_invalidate_tablespace( +/*==========================*/ + ulint id); /*!< in: space id */ +/******************************************************************//** +*/ +UNIV_INTERN +void +buf_LRU_mark_space_was_deleted( +/*===========================*/ + ulint id); /*!< in: space id */ +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG +/********************************************************************//** +Insert a compressed block into buf_pool->zip_clean in the LRU order. */ +UNIV_INTERN +void +buf_LRU_insert_zip_clean( +/*=====================*/ + buf_page_t* bpage); /*!< in: pointer to the block in question */ +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ + +/******************************************************************//** +Try to free a block. If bpage is a descriptor of a compressed-only +page, the descriptor object will be freed as well. + +NOTE: If this function returns TRUE, it will temporarily +release buf_pool_mutex. Furthermore, the page frame will no longer be +accessible via bpage. + +The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and +release these two mutexes after the call. No other +buf_page_get_mutex() may be held when calling this function. +@return TRUE if freed, FALSE otherwise. */ +UNIV_INTERN +ibool +buf_LRU_free_block( +/*===============*/ + buf_page_t* bpage, /*!< in: block to be freed */ + ibool zip, /*!< in: TRUE if should remove also the + compressed page of an uncompressed page */ + ibool have_LRU_mutex) + __attribute__((nonnull)); +/******************************************************************//** +Try to free a replaceable block. +@return TRUE if found and freed */ +UNIV_INTERN +ibool +buf_LRU_search_and_free_block( +/*==========================*/ + ulint n_iterations); /*!< in: how many times this has been called + repeatedly without result: a high value means + that we should search farther; if + n_iterations < 10, then we search + n_iterations / 10 * buf_pool->curr_size + pages from the end of the LRU list; if + n_iterations < 5, then we will also search + n_iterations / 5 of the unzip_LRU list. */ +/******************************************************************//** +Returns a free block from the buf_pool. The block is taken off the +free list. If it is empty, returns NULL. +@return a free control block, or NULL if the buf_block->free list is empty */ +UNIV_INTERN +buf_block_t* +buf_LRU_get_free_only(void); +/*=======================*/ +/******************************************************************//** +Returns a free block from the buf_pool. The block is taken off the +free list. If it is empty, blocks are moved from the end of the +LRU list to the free list. +@return the free control block, in state BUF_BLOCK_READY_FOR_USE */ +UNIV_INTERN +buf_block_t* +buf_LRU_get_free_block(void) +/*========================*/ + __attribute__((warn_unused_result)); + +/******************************************************************//** +Puts a block back to the free list. */ +UNIV_INTERN +void +buf_LRU_block_free_non_file_page( +/*=============================*/ + buf_block_t* block, /*!< in: block, must not contain a file page */ + ibool have_page_hash_mutex); +/******************************************************************//** +Adds a block to the LRU list. */ +UNIV_INTERN +void +buf_LRU_add_block( +/*==============*/ + buf_page_t* bpage, /*!< in: control block */ + ibool old); /*!< in: TRUE if should be put to the old + blocks in the LRU list, else put to the + start; if the LRU list is very short, added to + the start regardless of this parameter */ +/******************************************************************//** +Adds a block to the LRU list of decompressed zip pages. */ +UNIV_INTERN +void +buf_unzip_LRU_add_block( +/*====================*/ + buf_block_t* block, /*!< in: control block */ + ibool old); /*!< in: TRUE if should be put to the end + of the list, else put to the start */ +/******************************************************************//** +Moves a block to the start of the LRU list. */ +UNIV_INTERN +void +buf_LRU_make_block_young( +/*=====================*/ + buf_page_t* bpage); /*!< in: control block */ +/******************************************************************//** +Moves a block to the end of the LRU list. */ +UNIV_INTERN +void +buf_LRU_make_block_old( +/*===================*/ + buf_page_t* bpage); /*!< in: control block */ +/**********************************************************************//** +Updates buf_LRU_old_ratio. +@return updated old_pct */ +UNIV_INTERN +uint +buf_LRU_old_ratio_update( +/*=====================*/ + uint old_pct,/*!< in: Reserve this percentage of + the buffer pool for "old" blocks. */ + ibool adjust);/*!< in: TRUE=adjust the LRU list; + FALSE=just assign buf_LRU_old_ratio + during the initialization of InnoDB */ +/********************************************************************//** +Update the historical stats that we are collecting for LRU eviction +policy at the end of each interval. */ +UNIV_INTERN +void +buf_LRU_stat_update(void); +/*=====================*/ +/********************************************************************//** +Dump the LRU page list to the specific file. */ +UNIV_INTERN +ibool +buf_LRU_file_dump(void); +/*===================*/ +/********************************************************************//** +Read the pages based on the specific file.*/ +UNIV_INTERN +ibool +buf_LRU_file_restore(void); +/*======================*/ + +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG +/**********************************************************************//** +Validates the LRU list. +@return TRUE */ +UNIV_INTERN +ibool +buf_LRU_validate(void); +/*==================*/ +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ +#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG +/**********************************************************************//** +Prints the LRU list. */ +UNIV_INTERN +void +buf_LRU_print(void); +/*===============*/ +#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */ + +/** @name Heuristics for detecting index scan @{ */ +/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for +"old" blocks. Protected by buf_pool_mutex. */ +extern uint buf_LRU_old_ratio; +/** The denominator of buf_LRU_old_ratio. */ +#define BUF_LRU_OLD_RATIO_DIV 1024 +/** Maximum value of buf_LRU_old_ratio. +@see buf_LRU_old_adjust_len +@see buf_LRU_old_ratio_update */ +#define BUF_LRU_OLD_RATIO_MAX BUF_LRU_OLD_RATIO_DIV +/** Minimum value of buf_LRU_old_ratio. +@see buf_LRU_old_adjust_len +@see buf_LRU_old_ratio_update +The minimum must exceed +(BUF_LRU_OLD_TOLERANCE + 5) * BUF_LRU_OLD_RATIO_DIV / BUF_LRU_OLD_MIN_LEN. */ +#define BUF_LRU_OLD_RATIO_MIN 51 + +#if BUF_LRU_OLD_RATIO_MIN >= BUF_LRU_OLD_RATIO_MAX +# error "BUF_LRU_OLD_RATIO_MIN >= BUF_LRU_OLD_RATIO_MAX" +#endif +#if BUF_LRU_OLD_RATIO_MAX > BUF_LRU_OLD_RATIO_DIV +# error "BUF_LRU_OLD_RATIO_MAX > BUF_LRU_OLD_RATIO_DIV" +#endif + +/** Move blocks to "new" LRU list only if the first access was at +least this many milliseconds ago. Not protected by any mutex or latch. */ +extern uint buf_LRU_old_threshold_ms; +/* @} */ + +/** @brief Statistics for selecting the LRU list for eviction. + +These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O +and page_zip_decompress() operations. Based on the statistics we decide +if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */ +struct buf_LRU_stat_struct +{ + ulint io; /**< Counter of buffer pool I/O operations. */ + ulint unzip; /**< Counter of page_zip_decompress operations. */ +}; + +/** Statistics for selecting the LRU list for eviction. */ +typedef struct buf_LRU_stat_struct buf_LRU_stat_t; + +/** Current operation counters. Not protected by any mutex. +Cleared by buf_LRU_stat_update(). */ +extern buf_LRU_stat_t buf_LRU_stat_cur; + +/** Running sum of past values of buf_LRU_stat_cur. +Updated by buf_LRU_stat_update(). Protected by buf_pool_mutex. */ +extern buf_LRU_stat_t buf_LRU_stat_sum; + +/********************************************************************//** +Increments the I/O counter in buf_LRU_stat_cur. */ +#define buf_LRU_stat_inc_io() buf_LRU_stat_cur.io++ +/********************************************************************//** +Increments the page_zip_decompress() counter in buf_LRU_stat_cur. */ +#define buf_LRU_stat_inc_unzip() buf_LRU_stat_cur.unzip++ + +#ifndef UNIV_NONINL +#include "buf0lru.ic" +#endif + +#endif |