summaryrefslogtreecommitdiff
path: root/storage/xtradb/include/buf0buf.h
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2011-07-18 23:04:24 +0200
committerSergei Golubchik <sergii@pisem.net>2011-07-18 23:04:24 +0200
commit4e46d8e5bff140f2549841167dc4b65a3c0a645d (patch)
treec6612dcc1d0fbd801c084e6c36307d9e5913a293 /storage/xtradb/include/buf0buf.h
parent9a02c69f5c6766eaf552284a2a4dd0f1d26ecd2c (diff)
parentd4d7a8fa62c406be73f6c0f6d75e795293db548b (diff)
downloadmariadb-git-4e46d8e5bff140f2549841167dc4b65a3c0a645d.tar.gz
merge with xtradb-5.5.15
fix test cases
Diffstat (limited to 'storage/xtradb/include/buf0buf.h')
-rw-r--r--storage/xtradb/include/buf0buf.h538
1 files changed, 439 insertions, 99 deletions
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index e06927f42f0..ef6a26f9459 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -33,21 +33,29 @@ Created 11/5/1995 Heikki Tuuri
#include "hash0hash.h"
#include "ut0byte.h"
#include "page0types.h"
-#include "ut0rbt.h"
#ifndef UNIV_HOTBACKUP
+#include "ut0rbt.h"
#include "os0proc.h"
-#include "srv0srv.h"
/** @name Modes for buf_page_get_gen */
/* @{ */
#define BUF_GET 10 /*!< get always */
#define BUF_GET_IF_IN_POOL 11 /*!< get if in pool */
+#define BUF_PEEK_IF_IN_POOL 12 /*!< get if in pool, do not make
+ the block young in the LRU list */
#define BUF_GET_NO_LATCH 14 /*!< get and bufferfix, but
set no latch; we have
separated this case, because
it is error-prone programming
not to set a latch, and it
should be used with care */
+#define BUF_GET_IF_IN_POOL_OR_WATCH 15
+ /*!< Get the page only if it's in the
+ buffer pool, if not then set a watch
+ on the page. */
+#define BUF_GET_POSSIBLY_FREED 16
+ /*!< Like BUF_GET, but do not mind
+ if the file page has been freed. */
/* @} */
/** @name Modes for buf_page_get_known_nowait */
/* @{ */
@@ -60,7 +68,14 @@ Created 11/5/1995 Heikki Tuuri
position of the block. */
/* @} */
-extern buf_pool_t* buf_pool; /*!< The buffer pool of the database */
+#define MAX_BUFFER_POOLS 64 /*!< The maximum number of buffer
+ pools that can be defined */
+
+#define BUF_POOL_WATCH_SIZE 1 /*!< Maximum number of concurrent
+ buffer pool watches */
+
+extern buf_pool_t* buf_pool_ptr; /*!< The buffer pools
+ of the database */
#ifdef UNIV_DEBUG
extern ibool buf_debug_prints;/*!< If this is set TRUE, the program
prints info whenever read or flush
@@ -68,6 +83,8 @@ extern ibool buf_debug_prints;/*!< If this is set TRUE, the program
#endif /* UNIV_DEBUG */
extern ulint srv_buf_pool_write_requests; /*!< variable to count write request
issued */
+extern ulint srv_buf_pool_instances;
+extern ulint srv_buf_pool_curr_size;
#else /* !UNIV_HOTBACKUP */
extern buf_block_t* back_block1; /*!< first block, for --apply-log */
extern buf_block_t* back_block2; /*!< second block, for page reorganize */
@@ -83,6 +100,8 @@ The enumeration values must be 0..7. */
enum buf_page_state {
BUF_BLOCK_ZIP_FREE = 0, /*!< contains a free
compressed page */
+ BUF_BLOCK_POOL_WATCH = 0, /*!< a sentinel for the buffer pool
+ watch, element of buf_pool->watch[] */
BUF_BLOCK_ZIP_PAGE, /*!< contains a clean
compressed page */
BUF_BLOCK_ZIP_DIRTY, /*!< contains a compressed
@@ -102,21 +121,123 @@ enum buf_page_state {
before putting to the free list */
};
+
+/** This structure defines information we will fetch from each buffer pool. It
+will be used to print table IO stats */
+struct buf_pool_info_struct{
+ /* General buffer pool info */
+ ulint pool_unique_id; /*!< Buffer Pool ID */
+ ulint pool_size; /*!< Buffer Pool size in pages */
+ ulint pool_size_bytes;
+ ulint lru_len; /*!< Length of buf_pool->LRU */
+ ulint old_lru_len; /*!< buf_pool->LRU_old_len */
+ ulint free_list_len; /*!< Length of buf_pool->free list */
+ ulint flush_list_len; /*!< Length of buf_pool->flush_list */
+ ulint n_pend_unzip; /*!< buf_pool->n_pend_unzip, pages
+ pending decompress */
+ ulint n_pend_reads; /*!< buf_pool->n_pend_reads, pages
+ pending read */
+ ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */
+ ulint n_pending_flush_list; /*!< Pages pending flush in FLUSH
+ LIST */
+ ulint n_pending_flush_single_page;/*!< Pages pending flush in
+ BUF_FLUSH_SINGLE_PAGE list */
+ ulint n_pages_made_young; /*!< number of pages made young */
+ ulint n_pages_not_made_young; /*!< number of pages not made young */
+ ulint n_pages_read; /*!< buf_pool->n_pages_read */
+ ulint n_pages_created; /*!< buf_pool->n_pages_created */
+ ulint n_pages_written; /*!< buf_pool->n_pages_written */
+ ulint n_page_gets; /*!< buf_pool->n_page_gets */
+ ulint n_ra_pages_read; /*!< buf_pool->n_ra_pages_read, number
+ of pages readahead */
+ ulint n_ra_pages_evicted; /*!< buf_pool->n_ra_pages_evicted,
+ number of readahead pages evicted
+ without access */
+ ulint n_page_get_delta; /*!< num of buffer pool page gets since
+ last printout */
+
+ /* Buffer pool access stats */
+ double page_made_young_rate; /*!< page made young rate in pages
+ per second */
+ double page_not_made_young_rate;/*!< page not made young rate
+ in pages per second */
+ double pages_read_rate; /*!< num of pages read per second */
+ double pages_created_rate; /*!< num of pages create per second */
+ double pages_written_rate; /*!< num of pages written per second */
+ ulint page_read_delta; /*!< num of pages read since last
+ printout */
+ ulint young_making_delta; /*!< num of pages made young since
+ last printout */
+ ulint not_young_making_delta; /*!< num of pages not make young since
+ last printout */
+
+ /* Statistics about read ahead algorithm. */
+ double pages_readahead_rate; /*!< readahead rate in pages per
+ second */
+ double pages_evicted_rate; /*!< rate of readahead page evicted
+ without access, in pages per second */
+
+ /* Stats about LRU eviction */
+ ulint unzip_lru_len; /*!< length of buf_pool->unzip_LRU
+ list */
+ /* Counters for LRU policy */
+ ulint io_sum; /*!< buf_LRU_stat_sum.io */
+ ulint io_cur; /*!< buf_LRU_stat_cur.io, num of IO
+ for current interval */
+ ulint unzip_sum; /*!< buf_LRU_stat_sum.unzip */
+ ulint unzip_cur; /*!< buf_LRU_stat_cur.unzip, num
+ pages decompressed in current
+ interval */
+};
+
+typedef struct buf_pool_info_struct buf_pool_info_t;
+
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
+Acquire mutex on all buffer pool instances */
+UNIV_INLINE
+void
+buf_pool_mutex_enter_all(void);
+/*===========================*/
+
+/********************************************************************//**
+Release mutex on all buffer pool instances */
+UNIV_INLINE
+void
+buf_pool_mutex_exit_all(void);
+/*==========================*/
+
+/********************************************************************//**
+*/
+UNIV_INLINE
+void
+buf_pool_page_hash_x_lock_all(void);
+/*================================*/
+
+/********************************************************************//**
+*/
+UNIV_INLINE
+void
+buf_pool_page_hash_x_unlock_all(void);
+/*==================================*/
+
+/********************************************************************//**
Creates the buffer pool.
@return own: buf_pool object, NULL if not enough memory or error */
UNIV_INTERN
-buf_pool_t*
-buf_pool_init(void);
-/*===============*/
+ulint
+buf_pool_init(
+/*=========*/
+ ulint size, /*!< in: Size of the total pool in bytes */
+ ulint n_instances); /*!< in: Number of instances */
/********************************************************************//**
Frees the buffer pool at shutdown. This must not be invoked before
freeing all mutexes. */
UNIV_INTERN
void
-buf_pool_free(void);
-/*===============*/
+buf_pool_free(
+/*==========*/
+ ulint n_instances); /*!< in: numbere of instances to free */
/********************************************************************//**
Drops the adaptive hash index. To prevent a livelock, this function
@@ -153,23 +274,31 @@ UNIV_INLINE
ulint
buf_pool_get_curr_size(void);
/*========================*/
+/*********************************************************************//**
+Gets the current size of buffer buf_pool in frames.
+@return size in pages */
+UNIV_INLINE
+ulint
+buf_pool_get_n_pages(void);
+/*=======================*/
/********************************************************************//**
Gets the smallest oldest_modification lsn for any page in the pool. Returns
zero if all modified pages have been flushed to disk.
@return oldest modification in pool, zero if none */
-UNIV_INLINE
+UNIV_INTERN
ib_uint64_t
buf_pool_get_oldest_modification(void);
/*==================================*/
/********************************************************************//**
Allocates a buffer block.
@return own: the allocated block, in state BUF_BLOCK_MEMORY */
-UNIV_INLINE
+UNIV_INTERN
buf_block_t*
buf_block_alloc(
/*============*/
- ulint zip_size); /*!< in: compressed page size in bytes,
- or 0 if uncompressed tablespace */
+ buf_pool_t* buf_pool); /*!< in: buffer pool instance,
+ or NULL for round-robin selection
+ of the buffer pool */
/********************************************************************//**
Frees a buffer block which does not contain a file page. */
UNIV_INLINE
@@ -287,7 +416,8 @@ buf_page_get_gen(
ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
buf_block_t* guess, /*!< in: guessed block or NULL */
ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL,
- BUF_GET_NO_LATCH */
+ BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH or
+ BUF_GET_IF_IN_POOL_OR_WATCH */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr); /*!< in: mini-transaction */
@@ -337,9 +467,8 @@ void
buf_page_release(
/*=============*/
buf_block_t* block, /*!< in: buffer block */
- ulint rw_latch, /*!< in: RW_S_LATCH, RW_X_LATCH,
+ ulint rw_latch); /*!< in: RW_S_LATCH, RW_X_LATCH,
RW_NO_LATCH */
- mtr_t* mtr); /*!< in: mtr */
/********************************************************************//**
Moves a page to the start of the buffer pool LRU list. This high-level
function can be used to prevent an important page from slipping out of
@@ -371,7 +500,7 @@ buf_reset_check_index_page_at_flush(
/*================================*/
ulint space, /*!< in: space id */
ulint offset);/*!< in: page number */
-#ifdef UNIV_DEBUG_FILE_ACCESSES
+#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
/********************************************************************//**
Sets file_page_was_freed TRUE if the page is found in the buffer pool.
This function should be called when we free a file page and want the
@@ -396,7 +525,7 @@ buf_page_reset_file_page_was_freed(
/*===============================*/
ulint space, /*!< in: space id */
ulint offset); /*!< in: page number */
-#endif /* UNIV_DEBUG_FILE_ACCESSES */
+#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
/********************************************************************//**
Reads the freed_page_clock of a buffer block.
@return freed_page_clock */
@@ -449,7 +578,7 @@ buf_page_get_newest_modification(
page frame */
/********************************************************************//**
Increments the modify clock of a frame by 1. The caller must (1) own the
-buf_pool mutex and block bufferfix count has to be zero, (2) or own an x-lock
+buf_pool->mutex and block bufferfix count has to be zero, (2) or own an x-lock
on the block. */
UNIV_INLINE
void
@@ -536,7 +665,8 @@ UNIV_INTERN
buf_block_t*
buf_pool_contains_zip(
/*==================*/
- const void* data); /*!< in: pointer to compressed page */
+ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
+ const void* data); /*!< in: pointer to compressed page */
#endif /* UNIV_DEBUG */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/*********************************************************************//**
@@ -610,8 +740,15 @@ buf_get_modified_ratio_pct(void);
Refreshes the statistics used to print per-second averages. */
UNIV_INTERN
void
-buf_refresh_io_stats(void);
-/*======================*/
+buf_refresh_io_stats(
+/*=================*/
+ buf_pool_t* buf_pool); /*!< buffer pool instance */
+/**********************************************************************//**
+Refreshes the statistics used to print per-second averages. */
+UNIV_INTERN
+void
+buf_refresh_io_stats_all(void);
+/*=================*/
/*********************************************************************//**
Asserts that all file pages in the buffer are in a replaceable state.
@return TRUE */
@@ -836,7 +973,8 @@ buf_page_set_accessed(
__attribute__((nonnull));
/*********************************************************************//**
Gets the buf_block_t handle of a buffered file block if an uncompressed
-page frame exists, or NULL.
+page frame exists, or NULL. Note: even though bpage is not declared a
+const we don't update its value. It is safe to make this pure.
@return control block, or NULL */
UNIV_INLINE
buf_block_t*
@@ -988,8 +1126,7 @@ UNIV_INTERN
void
buf_page_io_complete(
/*=================*/
- buf_page_t* bpage, /*!< in: pointer to the block in question */
- trx_t* trx);
+ buf_page_t* bpage); /*!< in: pointer to the block in question */
/********************************************************************//**
Calculates a folded value of a file page address to use in the page hash
table.
@@ -1001,15 +1138,81 @@ buf_page_address_fold(
ulint space, /*!< in: space id */
ulint offset) /*!< in: offset of the page within space */
__attribute__((const));
+/********************************************************************//**
+Calculates the index of a buffer pool to the buf_pool[] array.
+@return the position of the buffer pool in buf_pool[] */
+UNIV_INLINE
+ulint
+buf_pool_index(
+/*===========*/
+ const buf_pool_t* buf_pool) /*!< in: buffer pool */
+ __attribute__((nonnull, const));
+/********************************************************************//**
+*/
+UNIV_INTERN
+buf_block_t*
+buf_page_from_array(
+/*================*/
+ buf_pool_t* buf_pool,
+ ulint n_block);
+/******************************************************************//**
+Returns the buffer pool instance given a page instance
+@return buf_pool */
+UNIV_INLINE
+buf_pool_t*
+buf_pool_from_bpage(
+/*================*/
+ const buf_page_t* bpage); /*!< in: buffer pool page */
+/******************************************************************//**
+Returns the buffer pool instance given a block instance
+@return buf_pool */
+UNIV_INLINE
+buf_pool_t*
+buf_pool_from_block(
+/*================*/
+ const buf_block_t* block); /*!< in: block */
+/******************************************************************//**
+Returns the buffer pool instance given space and offset of page
+@return buffer pool */
+UNIV_INLINE
+buf_pool_t*
+buf_pool_get(
+/*==========*/
+ ulint space, /*!< in: space id */
+ ulint offset);/*!< in: offset of the page within space */
+/******************************************************************//**
+Returns the buffer pool instance given its array index
+@return buffer pool */
+UNIV_INLINE
+buf_pool_t*
+buf_pool_from_array(
+/*================*/
+ ulint index); /*!< in: array index to get
+ buffer pool instance from */
/******************************************************************//**
Returns the control block of a file page, NULL if not found.
@return block, NULL if not found */
UNIV_INLINE
buf_page_t*
+buf_page_hash_get_low(
+/*==================*/
+ buf_pool_t* buf_pool, /*!< buffer pool instance */
+ ulint space, /*!< in: space id */
+ ulint offset, /*!< in: offset of the page
+ within space */
+ ulint fold); /*!< in: buf_page_address_fold(
+ space, offset) */
+/******************************************************************//**
+Returns the control block of a file page, NULL if not found.
+@return block, NULL if not found or not a real control block */
+UNIV_INLINE
+buf_page_t*
buf_page_hash_get(
/*==============*/
- ulint space, /*!< in: space id */
- ulint offset);/*!< in: offset of the page within space */
+ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
+ ulint space, /*!< in: space id */
+ ulint offset); /*!< in: offset of the page
+ within space */
/******************************************************************//**
Returns the control block of a file page, NULL if not found
or an uncompressed page frame does not exist.
@@ -1018,8 +1221,10 @@ UNIV_INLINE
buf_block_t*
buf_block_hash_get(
/*===============*/
- ulint space, /*!< in: space id */
- ulint offset);/*!< in: offset of the page within space */
+ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
+ ulint space, /*!< in: space id */
+ ulint offset); /*!< in: offset of the page
+ within space */
/*********************************************************************//**
Gets the current length of the free list of buffer blocks.
@return length of the free list */
@@ -1027,8 +1232,67 @@ UNIV_INTERN
ulint
buf_get_free_list_len(void);
/*=======================*/
-#endif /* !UNIV_HOTBACKUP */
+/********************************************************************
+Determine if a block is a sentinel for a buffer pool watch.
+@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
+UNIV_INTERN
+ibool
+buf_pool_watch_is_sentinel(
+/*=======================*/
+ buf_pool_t* buf_pool, /*!< buffer pool instance */
+ const buf_page_t* bpage) /*!< in: block */
+ __attribute__((nonnull, warn_unused_result));
+/****************************************************************//**
+Add watch for the given page to be read in. Caller must have the buffer pool
+@return NULL if watch set, block if the page is in the buffer pool */
+UNIV_INTERN
+buf_page_t*
+buf_pool_watch_set(
+/*===============*/
+ ulint space, /*!< in: space id */
+ ulint offset, /*!< in: page number */
+ ulint fold) /*!< in: buf_page_address_fold(space, offset) */
+ __attribute__((warn_unused_result));
+/****************************************************************//**
+Stop watching if the page has been read in.
+buf_pool_watch_set(space,offset) must have returned NULL before. */
+UNIV_INTERN
+void
+buf_pool_watch_unset(
+/*=================*/
+ ulint space, /*!< in: space id */
+ ulint offset);/*!< in: page number */
+/****************************************************************//**
+Check if the page has been read in.
+This may only be called after buf_pool_watch_set(space,offset)
+has returned NULL and before invoking buf_pool_watch_unset(space,offset).
+@return FALSE if the given page was not read in, TRUE if it was */
+UNIV_INTERN
+ibool
+buf_pool_watch_occurred(
+/*====================*/
+ ulint space, /*!< in: space id */
+ ulint offset) /*!< in: page number */
+ __attribute__((warn_unused_result));
+/********************************************************************//**
+Get total buffer pool statistics. */
+UNIV_INTERN
+void
+buf_get_total_list_len(
+/*===================*/
+ ulint* LRU_len, /*!< out: length of all LRU lists */
+ ulint* free_len, /*!< out: length of all free lists */
+ ulint* flush_list_len);/*!< out: length of all flush lists */
+/********************************************************************//**
+Get total buffer pool statistics. */
+UNIV_INTERN
+void
+buf_get_total_stat(
+/*===============*/
+ buf_pool_stat_t*tot_stat); /*!< out: buffer pool stats */
+
+#endif /* !UNIV_HOTBACKUP */
/** The common buffer control block structure
for compressed and uncompressed frames */
@@ -1037,18 +1301,18 @@ struct buf_page_struct{
/** @name General fields
None of these bit-fields must be modified without holding
buf_page_get_mutex() [buf_block_struct::mutex or
- buf_pool_zip_mutex], since they can be stored in the same
+ buf_pool->zip_mutex], since they can be stored in the same
machine word. Some of these fields are additionally protected
- by buf_pool_mutex. */
+ by buf_pool->mutex. */
/* @{ */
unsigned space:32; /*!< tablespace id; also protected
- by buf_pool_mutex. */
+ by buf_pool->mutex. */
unsigned offset:32; /*!< page number; also protected
- by buf_pool_mutex. */
+ by buf_pool->mutex. */
unsigned state:3; /*!< state of the control block; also
- protected by buf_pool_mutex.
+ protected by buf_pool->mutex.
State transitions from
BUF_BLOCK_READY_FOR_USE to
BUF_BLOCK_MEMORY need not be
@@ -1060,15 +1324,23 @@ struct buf_page_struct{
flush_type.
@see enum buf_flush */
unsigned io_fix:2; /*!< type of pending I/O operation;
- also protected by buf_pool_mutex
+ also protected by buf_pool->mutex
@see enum buf_io_fix */
- unsigned buf_fix_count:25;/*!< count of how manyfold this block
+ unsigned buf_fix_count:19;/*!< count of how manyfold this block
is currently bufferfixed */
+ unsigned buf_pool_index:6;/*!< index number of the buffer pool
+ that this block belongs to */
+# if MAX_BUFFER_POOLS > 64
+# error "MAX_BUFFER_POOLS > 64; redefine buf_pool_index:6"
+# endif
/* @} */
#endif /* !UNIV_HOTBACKUP */
page_zip_des_t zip; /*!< compressed page; zip.data
(but not the data it points to) is
- also protected by buf_pool_mutex */
+ also protected by buf_pool->mutex;
+ state == BUF_BLOCK_ZIP_PAGE and
+ zip.data == NULL means an active
+ buf_pool->watch */
#ifndef UNIV_HOTBACKUP
buf_page_t* hash; /*!< node used in chaining to
buf_pool->page_hash or
@@ -1079,14 +1351,16 @@ struct buf_page_struct{
#endif /* UNIV_DEBUG */
/** @name Page flushing fields
- All these are protected by buf_pool_mutex. */
+ All these are protected by buf_pool->mutex. */
/* @{ */
/* UT_LIST_NODE_T(buf_page_t) list; */
/*!< based on state, this is a
- list node, protected only by
- buf_pool_mutex, in one of the
- following lists in buf_pool:
+ list node, protected either by
+ buf_pool->mutex or by
+ buf_pool->flush_list_mutex,
+ in one of the following lists in
+ buf_pool:
- BUF_BLOCK_NOT_USED: free
- BUF_BLOCK_FILE_PAGE: flush_list
@@ -1094,6 +1368,12 @@ struct buf_page_struct{
- BUF_BLOCK_ZIP_PAGE: zip_clean
- BUF_BLOCK_ZIP_FREE: zip_free[]
+ If bpage is part of flush_list
+ then the node pointers are
+ covered by buf_pool->flush_list_mutex.
+ Otherwise these pointers are
+ protected by buf_pool->mutex.
+
The contents of the list node
is undefined if !in_flush_list
&& state == BUF_BLOCK_FILE_PAGE,
@@ -1108,12 +1388,18 @@ struct buf_page_struct{
UT_LIST_NODE_T(buf_page_t) zip_list; /* zip_clean or zip_free[] */
#ifdef UNIV_DEBUG
ibool in_flush_list; /*!< TRUE if in buf_pool->flush_list;
- when buf_pool_mutex is free, the
- following should hold: in_flush_list
+ when buf_pool->flush_list_mutex is
+ free, the following should hold:
+ in_flush_list
== (state == BUF_BLOCK_FILE_PAGE
- || state == BUF_BLOCK_ZIP_DIRTY) */
+ || state == BUF_BLOCK_ZIP_DIRTY)
+ Writes to this field must be
+ covered by both block->mutex
+ and buf_pool->flush_list_mutex. Hence
+ reads can happen while holding
+ any one of the two mutexes */
ibool in_free_list; /*!< TRUE if in buf_pool->free; when
- buf_pool_mutex is free, the following
+ buf_pool->mutex is free, the following
should hold: in_free_list
== (state == BUF_BLOCK_NOT_USED) */
#endif /* UNIV_DEBUG */
@@ -1121,7 +1407,8 @@ struct buf_page_struct{
/*!< log sequence number of
the youngest modification to
this block, zero if not
- modified */
+ modified. Protected by block
+ mutex */
ib_uint64_t oldest_modification;
/*!< log sequence number of
the START of the log entry
@@ -1129,11 +1416,16 @@ struct buf_page_struct{
modification to this block
which has not yet been flushed
on disk; zero if all
- modifications are on disk */
+ modifications are on disk.
+ Writes to this field must be
+ covered by both block->mutex
+ and buf_pool->flush_list_mutex. Hence
+ reads can happen while holding
+ any one of the two mutexes */
/* @} */
/** @name LRU replacement algorithm fields
- These fields are protected by buf_pool_mutex only (not
- buf_pool_zip_mutex or buf_block_struct::mutex). */
+ These fields are protected by buf_pool->mutex only (not
+ buf_pool->zip_mutex or buf_block_struct::mutex). */
/* @{ */
UT_LIST_NODE_T(buf_page_t) LRU;
@@ -1157,12 +1449,13 @@ struct buf_page_struct{
0 if the block was never accessed
in the buffer pool */
/* @} */
+ ibool space_was_being_deleted;
ibool is_corrupt;
-# ifdef UNIV_DEBUG_FILE_ACCESSES
+# if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ibool file_page_was_freed;
/*!< this is set to TRUE when fsp
frees a page in buffer pool */
-# endif /* UNIV_DEBUG_FILE_ACCESSES */
+# endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
#endif /* !UNIV_HOTBACKUP */
};
@@ -1203,7 +1496,7 @@ struct buf_block_struct{
unsigned lock_hash_val:32;/*!< hashed value of the page address
in the record lock hash table;
protected by buf_block_t::lock
- (or buf_block_t::mutex, buf_pool_mutex
+ (or buf_block_t::mutex, buf_pool->mutex
in buf_page_get_gen(),
buf_page_init_for_read()
and buf_page_create()) */
@@ -1264,7 +1557,7 @@ struct buf_block_struct{
pointers in the adaptive hash index
pointing to this frame */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- unsigned is_hashed:1; /*!< TRUE if hash index has
+ volatile unsigned is_hashed:1; /*!< TRUE if hash index has
already been built on this
page; note that it does not
guarantee that the index is
@@ -1278,6 +1571,7 @@ struct buf_block_struct{
unsigned curr_left_side:1;/*!< TRUE or FALSE in hash indexing */
dict_index_t* index; /*!< Index for which the adaptive
hash index has been created. */
+ volatile rw_lock_t* btr_search_latch;
/* @} */
# ifdef UNIV_SYNC_DEBUG
/** @name Debug fields */
@@ -1302,10 +1596,7 @@ struct buf_block_struct{
/**********************************************************************//**
Compute the hash fold value for blocks in buf_pool->zip_hash. */
/* @{ */
-/* the fold should be relative when srv_buffer_pool_shm_key is enabled */
-#define BUF_POOL_ZIP_FOLD_PTR(ptr) (!srv_buffer_pool_shm_key\
- ?((ulint) (ptr) / UNIV_PAGE_SIZE)\
- :((ulint) ((byte*)ptr - (byte*)(buf_pool->chunks->blocks->frame)) / UNIV_PAGE_SIZE))
+#define BUF_POOL_ZIP_FOLD_PTR(ptr) ((ulint) (ptr) / UNIV_PAGE_SIZE)
#define BUF_POOL_ZIP_FOLD(b) BUF_POOL_ZIP_FOLD_PTR((b)->frame)
#define BUF_POOL_ZIP_FOLD_BPAGE(b) BUF_POOL_ZIP_FOLD((buf_block_t*) (b))
/* @} */
@@ -1344,6 +1635,16 @@ struct buf_pool_stat_struct{
buf_page_peek_if_too_old() */
};
+/** Statistics of buddy blocks of a given size. */
+struct buf_buddy_stat_struct {
+ /** Number of blocks allocated from the buddy system. */
+ ulint used;
+ /** Number of blocks relocated by the buddy system. */
+ ib_uint64_t relocated;
+ /** Total duration of block relocations, in microseconds. */
+ ib_uint64_t relocated_usec;
+};
+
/** @brief The buffer pool structure.
NOTE! The definition appears here only for other modules of this
@@ -1353,7 +1654,30 @@ struct buf_pool_struct{
/** @name General fields */
/* @{ */
-
+ mutex_t mutex; /*!< Buffer pool mutex of this
+ instance */
+ mutex_t zip_mutex; /*!< Zip mutex of this buffer
+ pool instance, protects compressed
+ only pages (of type buf_page_t, not
+ buf_block_t */
+ mutex_t LRU_list_mutex;
+ rw_lock_t page_hash_latch;
+ mutex_t free_list_mutex;
+ mutex_t zip_free_mutex;
+ mutex_t zip_hash_mutex;
+ ulint instance_no; /*!< Array index of this buffer
+ pool instance */
+ ulint old_pool_size; /*!< Old pool size in bytes */
+ ulint curr_pool_size; /*!< Current pool size in bytes */
+ ulint LRU_old_ratio; /*!< Reserve this much of the buffer
+ pool for "old" blocks */
+#ifdef UNIV_DEBUG
+ ulint buddy_n_frames; /*!< Number of frames allocated from
+ the buffer pool to the buddy system */
+#endif
+#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
+ ulint mutex_exit_forbidden; /*!< Forbid release mutex */
+#endif
ulint n_chunks; /*!< number of buffer pool chunks */
buf_chunk_t* chunks; /*!< buffer pool chunks */
ulint curr_size; /*!< current pool size in pages */
@@ -1365,12 +1689,16 @@ struct buf_pool_struct{
whose frames are allocated to the
zip buddy system,
indexed by block->frame */
- ulint n_pend_reads; /*!< number of pending read operations */
+ ulint n_pend_reads; /*!< number of pending read
+ operations */
ulint n_pend_unzip; /*!< number of pending decompressions */
time_t last_printout_time;
/*!< when buf_print_io was last time
called */
+ buf_buddy_stat_t buddy_stat[BUF_BUDDY_SIZES_MAX + 1];
+ /*!< Statistics of buddy system,
+ indexed by block size */
buf_pool_stat_t stat; /*!< current statistics */
buf_pool_stat_t old_stat; /*!< old statistics */
@@ -1380,6 +1708,13 @@ struct buf_pool_struct{
/* @{ */
+ mutex_t flush_list_mutex;/*!< mutex protecting the
+ flush list access. This mutex
+ protects flush_list, flush_rbt
+ and bpage::list pointers when
+ the bpage is on flush_list. It
+ also protects writes to
+ bpage::oldest_modification */
UT_LIST_BASE_NODE_T(buf_page_t) flush_list;
/*!< base node of the modified block
list */
@@ -1393,7 +1728,7 @@ struct buf_pool_struct{
/*!< this is in the set state
when there is no flush batch
of the given type running */
- ib_rbt_t* flush_rbt; /* !< a red-black tree is used
+ ib_rbt_t* flush_rbt; /*!< a red-black tree is used
exclusively during recovery to
speed up insertions in the
flush_list. This tree contains
@@ -1405,7 +1740,8 @@ struct buf_pool_struct{
also be on the flush_list.
This tree is relevant only in
recovery and is set to NULL
- once the recovery is over. */
+ once the recovery is over.
+ Protected by flush_list_mutex */
ulint freed_page_clock;/*!< a sequence number used
to count the number of buffer
blocks removed from the end of
@@ -1419,8 +1755,8 @@ struct buf_pool_struct{
this is incremented by one; this is
set to zero when a buffer block is
allocated */
-
/* @} */
+
/** @name LRU replacement algorithm fields */
/* @{ */
@@ -1458,6 +1794,12 @@ struct buf_pool_struct{
/*!< unmodified compressed pages */
UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES_MAX];
/*!< buddy free lists */
+
+ buf_page_t watch[BUF_POOL_WATCH_SIZE];
+ /*!< Sentinel records for buffer
+ pool watches. Protected by
+ buf_pool->mutex. */
+
//#if BUF_BUDDY_HIGH != UNIV_PAGE_SIZE
//# error "BUF_BUDDY_HIGH != UNIV_PAGE_SIZE"
//#endif
@@ -1467,58 +1809,56 @@ struct buf_pool_struct{
/* @} */
};
-/** mutex protecting the buffer pool struct and control blocks, except the
-read-write lock in them */
-extern mutex_t buf_pool_mutex;
-extern mutex_t LRU_list_mutex;
-extern mutex_t flush_list_mutex;
-extern rw_lock_t page_hash_latch;
-extern mutex_t free_list_mutex;
-extern mutex_t zip_free_mutex;
-extern mutex_t zip_hash_mutex;
-/** mutex protecting the control blocks of compressed-only pages
-(of type buf_page_t, not buf_block_t) */
-extern mutex_t buf_pool_zip_mutex;
-
-/** @name Accessors for buf_pool_mutex.
-Use these instead of accessing buf_pool_mutex directly. */
+/** @name Accessors for buf_pool->mutex.
+Use these instead of accessing buf_pool->mutex directly. */
/* @{ */
-/** Test if buf_pool_mutex is owned. */
-#define buf_pool_mutex_own() mutex_own(&buf_pool_mutex)
-/** Acquire the buffer pool mutex. */
-#define buf_pool_mutex_enter() do { \
- ut_ad(!mutex_own(&buf_pool_zip_mutex)); \
- mutex_enter(&buf_pool_mutex); \
+/** Test if a buffer pool mutex is owned. */
+#define buf_pool_mutex_own(b) mutex_own(&b->mutex)
+/** Acquire a buffer pool mutex. */
+/* the buf_pool_mutex is changed the latch order */
+#define buf_pool_mutex_enter(b) do { \
+ mutex_enter(&b->mutex); \
+} while (0)
+
+/** Test if flush list mutex is owned. */
+#define buf_flush_list_mutex_own(b) mutex_own(&b->flush_list_mutex)
+
+/** Acquire the flush list mutex. */
+#define buf_flush_list_mutex_enter(b) do { \
+ mutex_enter(&b->flush_list_mutex); \
+} while (0)
+/** Release the flush list mutex. */
+# define buf_flush_list_mutex_exit(b) do { \
+ mutex_exit(&b->flush_list_mutex); \
} while (0)
+
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
-/** Flag to forbid the release of the buffer pool mutex.
-Protected by buf_pool_mutex. */
-extern ulint buf_pool_mutex_exit_forbidden;
/** Forbid the release of the buffer pool mutex. */
-# define buf_pool_mutex_exit_forbid() do { \
- ut_ad(buf_pool_mutex_own()); \
- buf_pool_mutex_exit_forbidden++; \
+# define buf_pool_mutex_exit_forbid(b) do { \
+ ut_ad(buf_pool_mutex_own(b)); \
+ b->mutex_exit_forbidden++; \
} while (0)
/** Allow the release of the buffer pool mutex. */
-# define buf_pool_mutex_exit_allow() do { \
- ut_ad(buf_pool_mutex_own()); \
- ut_a(buf_pool_mutex_exit_forbidden); \
- buf_pool_mutex_exit_forbidden--; \
+# define buf_pool_mutex_exit_allow(b) do { \
+ ut_ad(buf_pool_mutex_own(b)); \
+ ut_a(b->mutex_exit_forbidden); \
+ b->mutex_exit_forbidden--; \
} while (0)
/** Release the buffer pool mutex. */
-# define buf_pool_mutex_exit() do { \
- ut_a(!buf_pool_mutex_exit_forbidden); \
- mutex_exit(&buf_pool_mutex); \
+# define buf_pool_mutex_exit(b) do { \
+ ut_a(!b->mutex_exit_forbidden); \
+ mutex_exit(&b->mutex); \
} while (0)
#else
/** Forbid the release of the buffer pool mutex. */
-# define buf_pool_mutex_exit_forbid() ((void) 0)
+# define buf_pool_mutex_exit_forbid(b) ((void) 0)
/** Allow the release of the buffer pool mutex. */
-# define buf_pool_mutex_exit_allow() ((void) 0)
+# define buf_pool_mutex_exit_allow(b) ((void) 0)
/** Release the buffer pool mutex. */
-# define buf_pool_mutex_exit() mutex_exit(&buf_pool_mutex)
+# define buf_pool_mutex_exit(b) mutex_exit(&b->mutex)
#endif
#endif /* !UNIV_HOTBACKUP */
/* @} */