diff options
Diffstat (limited to 'storage/xtradb/include/buf0lru.h')
-rw-r--r-- | storage/xtradb/include/buf0lru.h | 134 |
1 files changed, 68 insertions, 66 deletions
diff --git a/storage/xtradb/include/buf0lru.h b/storage/xtradb/include/buf0lru.h index 4b415214fa5..6415540178c 100644 --- a/storage/xtradb/include/buf0lru.h +++ b/storage/xtradb/include/buf0lru.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., -51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA *****************************************************************************/ @@ -27,22 +27,13 @@ Created 11/5/1995 Heikki Tuuri #define buf0lru_h #include "univ.i" +#ifndef UNIV_HOTBACKUP #include "ut0byte.h" #include "buf0types.h" -/******************************************************************//** -Tries to remove LRU flushed blocks from the end of the LRU list and put them -to the free list. This is beneficial for the efficiency of the insert buffer -operation, as flushed pages from non-unique non-clustered indexes are here -taken out of the buffer pool, and their inserts redirected to the insert -buffer. Otherwise, the flushed blocks could get modified again before read -operations need new buffer blocks, and the i/o work done in flushing would be -wasted. */ -UNIV_INTERN -void -buf_LRU_try_free_flushed_blocks( -/*============================*/ - buf_pool_t* buf_pool); /*!< in: buffer pool instance */ +// Forward declaration +struct trx_t; + /******************************************************************//** Returns TRUE if less than 25 % of the buffer pool is available. This can be used in heuristics to prevent huge transactions eating up the whole buffer @@ -60,18 +51,19 @@ These are low-level functions /** Minimum LRU list length for which the LRU_old pointer is defined */ #define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */ -/** Maximum LRU list search length in buf_flush_LRU_recommendation() */ -#define BUF_LRU_FREE_SEARCH_LEN(b) (5 + 2 * BUF_READ_AHEAD_AREA(b)) - /******************************************************************//** -Removes all pages belonging to a given tablespace. */ +Flushes all dirty pages or removes all pages belonging +to a given tablespace. A PROBLEM: if readahead is being started, what +guarantees that it will not try to read in pages after this operation +has completed? */ UNIV_INTERN void buf_LRU_flush_or_remove_pages( /*==========================*/ - ulint id, /*!< in: space id */ - enum buf_remove_t buf_remove);/*!< in: remove or flush - strategy */ + ulint id, /*!< in: space id */ + buf_remove_t buf_remove, /*!< in: remove or flush strategy */ + const trx_t* trx); /*!< to check if the operation must + be interrupted */ #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** @@ -87,40 +79,35 @@ buf_LRU_insert_zip_clean( Try to free a block. If bpage is a descriptor of a compressed-only page, the descriptor object will be freed as well. -NOTE: This will temporarily release buf_pool_mutex. Furthermore, the -page frame will no longer be accessible via bpage. +NOTE: If this function returns true, it will release the LRU list mutex, +and temporarily release and relock the buf_page_get_mutex() mutex. +Furthermore, the page frame will no longer be accessible via bpage. If this +function returns false, the buf_page_get_mutex() might be temporarily released +and relocked too. + +The caller must hold the LRU list and buf_page_get_mutex() mutexes. -The caller must hold buf_page_get_mutex(bpage) and release this mutex -after the call. No other buf_page_get_mutex() may be held when -calling this function. -@return TRUE if freed, FALSE otherwise. */ +@return true if freed, false otherwise. */ UNIV_INTERN -ibool -buf_LRU_free_block( -/*===============*/ +bool +buf_LRU_free_page( +/*==============*/ buf_page_t* bpage, /*!< in: block to be freed */ - ibool zip, /*!< in: TRUE if should remove also the + bool zip) /*!< in: true if should remove also the compressed page of an uncompressed page */ - ibool* have_LRU_mutex) __attribute__((nonnull)); /******************************************************************//** Try to free a replaceable block. @return TRUE if found and freed */ UNIV_INTERN ibool -buf_LRU_search_and_free_block( -/*==========================*/ +buf_LRU_scan_and_free_block( +/*========================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ulint n_iterations); /*!< in: how many times this has - been called repeatedly without - result: a high value means that - we should search farther; if - n_iterations < 10, then we search - n_iterations / 10 * buf_pool->curr_size - pages from the end of the LRU list; if - n_iterations < 5, then we will - also search n_iterations / 5 - of the unzip_LRU list. */ + ibool scan_all) /*!< in: scan whole LRU list + if TRUE, otherwise scan only + 'old' blocks. */ + __attribute__((nonnull,warn_unused_result)); /******************************************************************//** Returns a free block from the buf_pool. The block is taken off the free list. If it is empty, returns NULL. @@ -134,6 +121,27 @@ buf_LRU_get_free_only( Returns a free block from the buf_pool. The block is taken off the free list. If it is empty, blocks are moved from the end of the LRU list to the free list. +This function is called from a user thread when it needs a clean +block to read in a page. Note that we only ever get a block from +the free list. Even when we flush a page or find a page in LRU scan +we put it to free list to be used. +* iteration 0: + * get a block from free list, success:done + * if there is an LRU flush batch in progress: + * wait for batch to end: retry free list + * if buf_pool->try_LRU_scan is set + * scan LRU up to srv_LRU_scan_depth to find a clean block + * the above will put the block on free list + * success:retry the free list + * flush one dirty page from tail of LRU to disk + * the above will put the block on free list + * success: retry the free list +* iteration 1: + * same as iteration 0 except: + * scan whole LRU list + * scan LRU list even if buf_pool->try_LRU_scan is not set +* iteration > 1: + * same as iteration 1 but sleep 100ms @return the free control block, in state BUF_BLOCK_READY_FOR_USE */ UNIV_INTERN buf_block_t* @@ -141,15 +149,22 @@ buf_LRU_get_free_block( /*===================*/ buf_pool_t* buf_pool) /*!< in/out: buffer pool instance */ __attribute__((nonnull,warn_unused_result)); - +/******************************************************************//** +Determines if the unzip_LRU list should be used for evicting a victim +instead of the general LRU list. +@return TRUE if should use unzip_LRU */ +UNIV_INTERN +ibool +buf_LRU_evict_from_unzip_LRU( +/*=========================*/ + buf_pool_t* buf_pool); /******************************************************************//** Puts a block back to the free list. */ UNIV_INTERN void buf_LRU_block_free_non_file_page( /*=============================*/ - buf_block_t* block, /*!< in: block, must not contain a file page */ - ibool have_page_hash_mutex); + buf_block_t* block); /*!< in: block, must not contain a file page */ /******************************************************************//** Adds a block to the LRU list. Please make sure that the zip_size is already set into the page zip when invoking the function, so that we @@ -206,18 +221,6 @@ UNIV_INTERN void buf_LRU_stat_update(void); /*=====================*/ -/********************************************************************//** -Dump the LRU page list to the specific file. */ -UNIV_INTERN -ibool -buf_LRU_file_dump(void); -/*===================*/ -/********************************************************************//** -Read the pages based on the specific file.*/ -UNIV_INTERN -ibool -buf_LRU_file_restore(void); -/*======================*/ /******************************************************************//** Remove one page from LRU list and put it to free list */ @@ -279,21 +282,18 @@ extern uint buf_LRU_old_threshold_ms; These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O and page_zip_decompress() operations. Based on the statistics we decide if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */ -struct buf_LRU_stat_struct +struct buf_LRU_stat_t { ulint io; /**< Counter of buffer pool I/O operations. */ ulint unzip; /**< Counter of page_zip_decompress operations. */ }; -/** Statistics for selecting the LRU list for eviction. */ -typedef struct buf_LRU_stat_struct buf_LRU_stat_t; - /** Current operation counters. Not protected by any mutex. Cleared by buf_LRU_stat_update(). */ extern buf_LRU_stat_t buf_LRU_stat_cur; /** Running sum of past values of buf_LRU_stat_cur. -Updated by buf_LRU_stat_update(). Protected by buf_pool->mutex. */ +Updated by buf_LRU_stat_update(). */ extern buf_LRU_stat_t buf_LRU_stat_sum; /********************************************************************//** @@ -307,4 +307,6 @@ Increments the page_zip_decompress() counter in buf_LRU_stat_cur. */ #include "buf0lru.ic" #endif +#endif /* !UNIV_HOTBACKUP */ + #endif |