diff options
Diffstat (limited to 'storage/xtradb/buf/buf0lru.c')
-rw-r--r-- | storage/xtradb/buf/buf0lru.c | 531 |
1 files changed, 389 insertions, 142 deletions
diff --git a/storage/xtradb/buf/buf0lru.c b/storage/xtradb/buf/buf0lru.c index 2270ea5dce2..58e2c23275b 100644 --- a/storage/xtradb/buf/buf0lru.c +++ b/storage/xtradb/buf/buf0lru.c @@ -49,18 +49,22 @@ Created 11/5/1995 Heikki Tuuri #include "log0recv.h" #include "srv0srv.h" -/** The number of blocks from the LRU_old pointer onward, including the block -pointed to, must be 3/8 of the whole LRU list length, except that the -tolerance defined below is allowed. Note that the tolerance must be small -enough such that for even the BUF_LRU_OLD_MIN_LEN long LRU list, the -LRU_old pointer is not allowed to point to either end of the LRU list. */ +/** The number of blocks from the LRU_old pointer onward, including +the block pointed to, must be buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV +of the whole LRU list length, except that the tolerance defined below +is allowed. Note that the tolerance must be small enough such that for +even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not +allowed to point to either end of the LRU list. */ #define BUF_LRU_OLD_TOLERANCE 20 -/** The whole LRU list length is divided by this number to determine an -initial segment in buf_LRU_get_recent_limit */ - -#define BUF_LRU_INITIAL_RATIO 8 +/** The minimum amount of non-old blocks when the LRU_old list exists +(that is, when there are more than BUF_LRU_OLD_MIN_LEN blocks). +@see buf_LRU_old_adjust_len */ +#define BUF_LRU_NON_OLD_MIN_LEN 5 +#if BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN +# error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN" +#endif /** When dropping the search hash index entries before deleting an ibd file, we build a local array of pages belonging to that tablespace @@ -107,6 +111,15 @@ UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_sum; /* @} */ +/** @name Heuristics for detecting index scan @{ */ +/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for +"old" blocks. Protected by buf_pool_mutex. */ +UNIV_INTERN uint buf_LRU_old_ratio; +/** Move blocks to "new" LRU list only if the first access was at +least this many milliseconds ago. Not protected by any mutex or latch. */ +UNIV_INTERN uint buf_LRU_old_threshold_ms; +/* @} */ + /******************************************************************//** Takes a block out of the LRU list and page hash table. If the block is compressed-only (BUF_BLOCK_ZIP_PAGE), @@ -255,9 +268,12 @@ scan_again: mutex_t* block_mutex = buf_page_get_mutex_enter(bpage); buf_page_t* prev_bpage; - ut_a(block_mutex); prev_bpage = UT_LIST_GET_PREV(LRU, bpage); + if (!block_mutex) { + goto next_page; + } + ut_a(buf_page_in_file(bpage)); if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE @@ -360,9 +376,13 @@ scan_again: ut_a(buf_page_in_file(bpage)); - ut_a(block_mutex); prev_bpage = UT_LIST_GET_PREV(LRU, bpage); + if (!block_mutex) { + bpage = prev_bpage; + continue; + } + if (buf_page_get_space(bpage) == id) { if (bpage->buf_fix_count > 0 || buf_page_get_io_fix(bpage) != BUF_IO_NONE) { @@ -450,45 +470,6 @@ next_page: } } -/******************************************************************//** -Gets the minimum LRU_position field for the blocks in an initial segment -(determined by BUF_LRU_INITIAL_RATIO) of the LRU list. The limit is not -guaranteed to be precise, because the ulint_clock may wrap around. -@return the limit; zero if could not determine it */ -UNIV_INTERN -ulint -buf_LRU_get_recent_limit(void) -/*==========================*/ -{ - const buf_page_t* bpage; - ulint len; - ulint limit; - - //buf_pool_mutex_enter(); - mutex_enter(&LRU_list_mutex); - - len = UT_LIST_GET_LEN(buf_pool->LRU); - - if (len < BUF_LRU_OLD_MIN_LEN) { - /* The LRU list is too short to do read-ahead */ - - //buf_pool_mutex_exit(); - mutex_exit(&LRU_list_mutex); - - return(0); - } - - bpage = UT_LIST_GET_FIRST(buf_pool->LRU); - - limit = buf_page_get_LRU_position(bpage); - len /= BUF_LRU_INITIAL_RATIO; - - //buf_pool_mutex_exit(); - mutex_exit(&LRU_list_mutex); - - return(limit > len ? (limit - len) : 0); -} - /********************************************************************//** Insert a compressed block into buf_pool->zip_clean in the LRU order. */ UNIV_INTERN @@ -631,10 +612,13 @@ restart: bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) { enum buf_lru_free_block_status freed; + unsigned accessed; mutex_t* block_mutex = buf_page_get_mutex_enter(bpage); - ut_a(block_mutex); + if (!block_mutex) { + goto restart; + } if (!bpage->in_LRU_list || !buf_page_in_file(bpage)) { @@ -645,11 +629,18 @@ restart: ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); + accessed = buf_page_is_accessed(bpage); freed = buf_LRU_free_block(bpage, TRUE, NULL, have_LRU_mutex); mutex_exit(block_mutex); switch (freed) { case BUF_LRU_FREED: + /* Keep track of pages that are evicted without + ever being accessed. This gives us a measure of + the effectiveness of readahead */ + if (!accessed) { + ++buf_pool->stat.n_ra_pages_evicted; + } return(TRUE); case BUF_LRU_NOT_FREED: @@ -1027,8 +1018,10 @@ buf_LRU_old_adjust_len(void) ut_a(buf_pool->LRU_old); //ut_ad(buf_pool_mutex_own()); ut_ad(mutex_own(&LRU_list_mutex)); -#if 3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5 -# error "3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5" + ut_ad(buf_LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN); + ut_ad(buf_LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX); +#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5) +# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)" #endif #ifdef UNIV_LRU_DEBUG /* buf_pool->LRU_old must be the first item in the LRU list @@ -1040,34 +1033,39 @@ buf_LRU_old_adjust_len(void) || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old); #endif /* UNIV_LRU_DEBUG */ + old_len = buf_pool->LRU_old_len; + new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU) + * buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV, + UT_LIST_GET_LEN(buf_pool->LRU) + - (BUF_LRU_OLD_TOLERANCE + + BUF_LRU_NON_OLD_MIN_LEN)); + for (;;) { - old_len = buf_pool->LRU_old_len; - new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8); + buf_page_t* LRU_old = buf_pool->LRU_old; - ut_ad(buf_pool->LRU_old->in_LRU_list); - ut_a(buf_pool->LRU_old); + ut_a(LRU_old); + ut_ad(LRU_old->in_LRU_list); #ifdef UNIV_LRU_DEBUG - ut_a(buf_pool->LRU_old->old); + ut_a(LRU_old->old); #endif /* UNIV_LRU_DEBUG */ /* Update the LRU_old pointer if necessary */ - if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) { + if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) { - buf_pool->LRU_old = UT_LIST_GET_PREV( - LRU, buf_pool->LRU_old); + buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV( + LRU, LRU_old); #ifdef UNIV_LRU_DEBUG - ut_a(!buf_pool->LRU_old->old); + ut_a(!LRU_old->old); #endif /* UNIV_LRU_DEBUG */ - buf_page_set_old(buf_pool->LRU_old, TRUE); - buf_pool->LRU_old_len++; + old_len = ++buf_pool->LRU_old_len; + buf_page_set_old(LRU_old, TRUE); } else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) { - buf_page_set_old(buf_pool->LRU_old, FALSE); - buf_pool->LRU_old = UT_LIST_GET_NEXT( - LRU, buf_pool->LRU_old); - buf_pool->LRU_old_len--; + buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old); + old_len = --buf_pool->LRU_old_len; + buf_page_set_old(LRU_old, FALSE); } else { return; } @@ -1092,12 +1090,13 @@ buf_LRU_old_init(void) the adjust function to move the LRU_old pointer to the right position */ - bpage = UT_LIST_GET_FIRST(buf_pool->LRU); - - while (bpage != NULL) { + for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage != NULL; + bpage = UT_LIST_GET_PREV(LRU, bpage)) { ut_ad(bpage->in_LRU_list); - buf_page_set_old(bpage, TRUE); - bpage = UT_LIST_GET_NEXT(LRU, bpage); + ut_ad(buf_page_in_file(bpage)); + /* This loop temporarily violates the + assertions of buf_page_set_old(). */ + bpage->old = TRUE; } buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU); @@ -1152,16 +1151,19 @@ buf_LRU_remove_block( if (UNIV_UNLIKELY(bpage == buf_pool->LRU_old)) { - /* Below: the previous block is guaranteed to exist, because - the LRU_old pointer is only allowed to differ by the - tolerance value from strict 3/8 of the LRU list length. */ + /* Below: the previous block is guaranteed to exist, + because the LRU_old pointer is only allowed to differ + by BUF_LRU_OLD_TOLERANCE from strict + buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU + list length. */ + buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage); - buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, bpage); - ut_a(buf_pool->LRU_old); + ut_a(prev_bpage); #ifdef UNIV_LRU_DEBUG - ut_a(!buf_pool->LRU_old->old); + ut_a(!prev_bpage->old); #endif /* UNIV_LRU_DEBUG */ - buf_page_set_old(buf_pool->LRU_old, TRUE); + buf_pool->LRU_old = prev_bpage; + buf_page_set_old(prev_bpage, TRUE); buf_pool->LRU_old_len++; } @@ -1172,10 +1174,19 @@ buf_LRU_remove_block( buf_unzip_LRU_remove_block_if_needed(bpage); - /* If the LRU list is so short that LRU_old not defined, return */ + /* If the LRU list is so short that LRU_old is not defined, + clear the "old" flags and return */ if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) { + for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL; + bpage = UT_LIST_GET_NEXT(LRU, bpage)) { + /* This loop temporarily violates the + assertions of buf_page_set_old(). */ + bpage->old = FALSE; + } + buf_pool->LRU_old = NULL; + buf_pool->LRU_old_len = 0; return; } @@ -1227,8 +1238,6 @@ buf_LRU_add_block_to_end_low( /*=========================*/ buf_page_t* bpage) /*!< in: control block */ { - buf_page_t* last_bpage; - ut_ad(buf_pool); ut_ad(bpage); //ut_ad(buf_pool_mutex_own()); @@ -1236,31 +1245,18 @@ buf_LRU_add_block_to_end_low( ut_a(buf_page_in_file(bpage)); - last_bpage = UT_LIST_GET_LAST(buf_pool->LRU); - - if (last_bpage) { - bpage->LRU_position = last_bpage->LRU_position; - } else { - bpage->LRU_position = buf_pool_clock_tic(); - } - ut_ad(!bpage->in_LRU_list); UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage); bpage->in_LRU_list = TRUE; - buf_page_set_old(bpage, TRUE); - - if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) { - - buf_pool->LRU_old_len++; - } - if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) { ut_ad(buf_pool->LRU_old); /* Adjust the length of the old block list if necessary */ + buf_page_set_old(bpage, TRUE); + buf_pool->LRU_old_len++; buf_LRU_old_adjust_len(); } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) { @@ -1269,6 +1265,8 @@ buf_LRU_add_block_to_end_low( defined: init it */ buf_LRU_old_init(); + } else { + buf_page_set_old(bpage, buf_pool->LRU_old != NULL); } /* If this is a zipped block with decompressed frame as well @@ -1302,7 +1300,6 @@ buf_LRU_add_block_low( UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, bpage); - bpage->LRU_position = buf_pool_clock_tic(); bpage->freed_page_clock = buf_pool->freed_page_clock; } else { #ifdef UNIV_LRU_DEBUG @@ -1317,23 +1314,17 @@ buf_LRU_add_block_low( UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old, bpage); buf_pool->LRU_old_len++; - - /* We copy the LRU position field of the previous block - to the new block */ - - bpage->LRU_position = (buf_pool->LRU_old)->LRU_position; } bpage->in_LRU_list = TRUE; - buf_page_set_old(bpage, old); - if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) { ut_ad(buf_pool->LRU_old); /* Adjust the length of the old block list if necessary */ + buf_page_set_old(bpage, old); buf_LRU_old_adjust_len(); } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) { @@ -1342,6 +1333,8 @@ buf_LRU_add_block_low( defined: init it */ buf_LRU_old_init(); + } else { + buf_page_set_old(bpage, buf_pool->LRU_old != NULL); } /* If this is a zipped block with decompressed frame as well @@ -1375,6 +1368,13 @@ buf_LRU_make_block_young( /*=====================*/ buf_page_t* bpage) /*!< in: control block */ { + //ut_ad(buf_pool_mutex_own()); + ut_ad(mutex_own(&LRU_list_mutex)); + + if (bpage->old) { + buf_pool->stat.n_pages_made_young++; + } + buf_LRU_remove_block(bpage); buf_LRU_add_block_low(bpage, FALSE); } @@ -1571,15 +1571,6 @@ not_freed: buf_pool->LRU_old = b; } -#ifdef UNIV_LRU_DEBUG - ut_a(prev_b->old - || !UT_LIST_GET_NEXT(LRU, b) - || UT_LIST_GET_NEXT(LRU, b)->old); - } else { - ut_a(!prev_b->old - || !UT_LIST_GET_NEXT(LRU, b) - || !UT_LIST_GET_NEXT(LRU, b)->old); -#endif /* UNIV_LRU_DEBUG */ } lru_len = UT_LIST_GET_LEN(buf_pool->LRU); @@ -1595,6 +1586,11 @@ not_freed: defined: init it */ buf_LRU_old_init(); } +#ifdef UNIV_LRU_DEBUG + /* Check that the "old" flag is consistent + in the block and its neighbours. */ + buf_page_set_old(b, buf_page_is_old(b)); +#endif /* UNIV_LRU_DEBUG */ } else { b->in_LRU_list = FALSE; buf_LRU_add_block_low(b, buf_page_is_old(b)); @@ -1985,6 +1981,52 @@ buf_LRU_block_free_hashed_page( buf_LRU_block_free_non_file_page(block, have_page_hash_mutex); } +/**********************************************************************//** +Updates buf_LRU_old_ratio. +@return updated old_pct */ +UNIV_INTERN +uint +buf_LRU_old_ratio_update( +/*=====================*/ + uint old_pct,/*!< in: Reserve this percentage of + the buffer pool for "old" blocks. */ + ibool adjust) /*!< in: TRUE=adjust the LRU list; + FALSE=just assign buf_LRU_old_ratio + during the initialization of InnoDB */ +{ + uint ratio; + + ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100; + if (ratio < BUF_LRU_OLD_RATIO_MIN) { + ratio = BUF_LRU_OLD_RATIO_MIN; + } else if (ratio > BUF_LRU_OLD_RATIO_MAX) { + ratio = BUF_LRU_OLD_RATIO_MAX; + } + + if (adjust) { + //buf_pool_mutex_enter(); + mutex_enter(&LRU_list_mutex); + + if (ratio != buf_LRU_old_ratio) { + buf_LRU_old_ratio = ratio; + + if (UT_LIST_GET_LEN(buf_pool->LRU) + >= BUF_LRU_OLD_MIN_LEN) { + buf_LRU_old_adjust_len(); + } + } + + //buf_pool_mutex_exit(); + mutex_exit(&LRU_list_mutex); + } else { + buf_LRU_old_ratio = ratio; + } + + /* the reverse of + ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */ + return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5)); +} + /********************************************************************//** Update the historical stats that we are collecting for LRU eviction policy at the end of each interval. */ @@ -2023,6 +2065,218 @@ func_exit: memset(&buf_LRU_stat_cur, 0, sizeof buf_LRU_stat_cur); } +/********************************************************************//** +Dump the LRU page list to the specific file. */ +#define LRU_DUMP_FILE "ib_lru_dump" + +UNIV_INTERN +ibool +buf_LRU_file_dump(void) +/*===================*/ +{ + os_file_t dump_file = -1; + ibool success; + byte* buffer_base = NULL; + byte* buffer = NULL; + buf_page_t* bpage; + ulint buffers; + ulint offset; + ibool ret = FALSE; + ulint i; + + for (i = 0; i < srv_n_data_files; i++) { + if (strstr(srv_data_file_names[i], LRU_DUMP_FILE) != NULL) { + fprintf(stderr, + " InnoDB: The name '%s' seems to be used for" + " innodb_data_file_path. Dumping LRU list is not" + " done for safeness.\n", LRU_DUMP_FILE); + goto end; + } + } + + buffer_base = ut_malloc(2 * UNIV_PAGE_SIZE); + buffer = ut_align(buffer_base, UNIV_PAGE_SIZE); + if (!buffer) { + fprintf(stderr, + " InnoDB: cannot allocate buffer.\n"); + goto end; + } + + dump_file = os_file_create(LRU_DUMP_FILE, OS_FILE_OVERWRITE, + OS_FILE_NORMAL, OS_DATA_FILE, &success); + if (!success) { + os_file_get_last_error(TRUE); + fprintf(stderr, + " InnoDB: cannot open %s\n", LRU_DUMP_FILE); + goto end; + } + + mutex_enter(&LRU_list_mutex); + bpage = UT_LIST_GET_LAST(buf_pool->LRU); + + buffers = offset = 0; + while (bpage != NULL) { + if (offset == 0) { + memset(buffer, 0, UNIV_PAGE_SIZE); + } + + mach_write_to_4(buffer + offset * 4, bpage->space); + offset++; + mach_write_to_4(buffer + offset * 4, bpage->offset); + offset++; + + if (offset == UNIV_PAGE_SIZE/4) { + success = os_file_write(LRU_DUMP_FILE, dump_file, buffer, + (buffers << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL, + (buffers >> (32 - UNIV_PAGE_SIZE_SHIFT)), + UNIV_PAGE_SIZE); + if (!success) { + mutex_exit(&LRU_list_mutex); + fprintf(stderr, + " InnoDB: cannot write page %lu of %s\n", + buffers, LRU_DUMP_FILE); + goto end; + } + buffers++; + offset = 0; + } + + bpage = UT_LIST_GET_PREV(LRU, bpage); + } + mutex_exit(&LRU_list_mutex); + + if (offset == 0) { + memset(buffer, 0, UNIV_PAGE_SIZE); + } + + mach_write_to_4(buffer + offset * 4, 0xFFFFFFFFUL); + offset++; + mach_write_to_4(buffer + offset * 4, 0xFFFFFFFFUL); + offset++; + + success = os_file_write(LRU_DUMP_FILE, dump_file, buffer, + (buffers << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL, + (buffers >> (32 - UNIV_PAGE_SIZE_SHIFT)), + UNIV_PAGE_SIZE); + if (!success) { + goto end; + } + + ret = TRUE; +end: + if (dump_file != -1) + os_file_close(dump_file); + if (buffer_base) + ut_free(buffer_base); + + return(ret); +} +/********************************************************************//** +Read the pages based on the specific file.*/ +UNIV_INTERN +ibool +buf_LRU_file_restore(void) +/*======================*/ +{ + os_file_t dump_file = -1; + ibool success; + byte* buffer_base = NULL; + byte* buffer = NULL; + ulint buffers; + ulint offset; + ulint reads = 0; + ulint req = 0; + ibool terminated = FALSE; + ibool ret = FALSE; + + buffer_base = ut_malloc(2 * UNIV_PAGE_SIZE); + buffer = ut_align(buffer_base, UNIV_PAGE_SIZE); + if (!buffer) { + fprintf(stderr, + " InnoDB: cannot allocate buffer.\n"); + goto end; + } + + dump_file = os_file_create_simple_no_error_handling( + LRU_DUMP_FILE, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); + if (!success) { + os_file_get_last_error(TRUE); + fprintf(stderr, + " InnoDB: cannot open %s\n", LRU_DUMP_FILE); + goto end; + } + + buffers = 0; + while (!terminated) { + success = os_file_read(dump_file, buffer, + (buffers << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFFUL, + (buffers >> (32 - UNIV_PAGE_SIZE_SHIFT)), + UNIV_PAGE_SIZE); + if (!success) { + fprintf(stderr, + " InnoDB: cannot read page %lu of %s," + " or meet unexpected terminal.", + buffers, LRU_DUMP_FILE); + goto end; + } + + for (offset = 0; offset < UNIV_PAGE_SIZE/4; offset += 2) { + ulint space_id, zip_size, page_no; + ulint err; + ib_int64_t tablespace_version; + + space_id = mach_read_from_4(buffer + offset * 4); + page_no = mach_read_from_4(buffer + (offset + 1) * 4); + if (space_id == 0xFFFFFFFFUL + || page_no == 0xFFFFFFFFUL) { + terminated = TRUE; + break; + } + + if (offset % 16 == 15) { + os_aio_simulated_wake_handler_threads(); + buf_flush_free_margin(FALSE); + } + + zip_size = fil_space_get_zip_size(space_id); + if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { + continue; + } + + if (fil_area_is_exist(space_id, zip_size, page_no, 0, + zip_size ? zip_size : UNIV_PAGE_SIZE)) { + + tablespace_version = fil_space_get_version(space_id); + + req++; + reads += buf_read_page_low(&err, FALSE, BUF_READ_ANY_PAGE + | OS_AIO_SIMULATED_WAKE_LATER, + space_id, zip_size, TRUE, + tablespace_version, page_no, NULL); + buf_LRU_stat_inc_io(); + } + } + + buffers++; + } + + os_aio_simulated_wake_handler_threads(); + buf_flush_free_margin(FALSE); + + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: reading pages based on the dumped LRU list was done." + " (requested: %lu, read: %lu)\n", req, reads); + ret = TRUE; +end: + if (dump_file != -1) + os_file_close(dump_file); + if (buffer_base) + ut_free(buffer_base); + + return(ret); +} + #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Validates the LRU list. @@ -2036,7 +2290,6 @@ buf_LRU_validate(void) buf_block_t* block; ulint old_len; ulint new_len; - ulint LRU_pos; ut_ad(buf_pool); //buf_pool_mutex_enter(); @@ -2046,7 +2299,11 @@ buf_LRU_validate(void) ut_a(buf_pool->LRU_old); old_len = buf_pool->LRU_old_len; - new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8); + new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU) + * buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV, + UT_LIST_GET_LEN(buf_pool->LRU) + - (BUF_LRU_OLD_TOLERANCE + + BUF_LRU_NON_OLD_MIN_LEN)); ut_a(old_len >= new_len - BUF_LRU_OLD_TOLERANCE); ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE); } @@ -2077,28 +2334,24 @@ buf_LRU_validate(void) } if (buf_page_is_old(bpage)) { - old_len++; - } + const buf_page_t* prev + = UT_LIST_GET_PREV(LRU, bpage); + const buf_page_t* next + = UT_LIST_GET_NEXT(LRU, bpage); - if (buf_pool->LRU_old && (old_len == 1)) { - ut_a(buf_pool->LRU_old == bpage); - } + if (!old_len++) { + ut_a(buf_pool->LRU_old == bpage); + } else { + ut_a(!prev || buf_page_is_old(prev)); + } - LRU_pos = buf_page_get_LRU_position(bpage); + ut_a(!next || buf_page_is_old(next)); + } bpage = UT_LIST_GET_NEXT(LRU, bpage); - - if (bpage) { - /* If the following assert fails, it may - not be an error: just the buf_pool clock - has wrapped around */ - ut_a(LRU_pos >= buf_page_get_LRU_position(bpage)); - } } - if (buf_pool->LRU_old) { - ut_a(buf_pool->LRU_old_len == old_len); - } + ut_a(buf_pool->LRU_old_len == old_len); mutex_exit(&LRU_list_mutex); mutex_enter(&free_list_mutex); @@ -2149,9 +2402,6 @@ buf_LRU_print(void) //buf_pool_mutex_enter(); mutex_enter(&LRU_list_mutex); - fprintf(stderr, "Pool ulint clock %lu\n", - (ulong) buf_pool->ulint_clock); - bpage = UT_LIST_GET_FIRST(buf_pool->LRU); while (bpage != NULL) { @@ -2182,18 +2432,16 @@ buf_LRU_print(void) const byte* frame; case BUF_BLOCK_FILE_PAGE: frame = buf_block_get_frame((buf_block_t*) bpage); - fprintf(stderr, "\nLRU pos %lu type %lu" + fprintf(stderr, "\ntype %lu" " index id %lu\n", - (ulong) buf_page_get_LRU_position(bpage), (ulong) fil_page_get_type(frame), (ulong) ut_dulint_get_low( btr_page_get_index_id(frame))); break; case BUF_BLOCK_ZIP_PAGE: frame = bpage->zip.data; - fprintf(stderr, "\nLRU pos %lu type %lu size %lu" + fprintf(stderr, "\ntype %lu size %lu" " index id %lu\n", - (ulong) buf_page_get_LRU_position(bpage), (ulong) fil_page_get_type(frame), (ulong) buf_page_get_zip_size(bpage), (ulong) ut_dulint_get_low( @@ -2201,8 +2449,7 @@ buf_LRU_print(void) break; default: - fprintf(stderr, "\nLRU pos %lu !state %lu!\n", - (ulong) buf_page_get_LRU_position(bpage), + fprintf(stderr, "\n!state %lu!\n", (ulong) buf_page_get_state(bpage)); break; } |