diff options
Diffstat (limited to 'storage/innobase/buf/buf0buddy.cc')
-rw-r--r-- | storage/innobase/buf/buf0buddy.cc | 446 |
1 files changed, 304 insertions, 142 deletions
diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc index e34216dbc8f..ee400fcdf23 100644 --- a/storage/innobase/buf/buf0buddy.cc +++ b/storage/innobase/buf/buf0buddy.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2006, 2013, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -33,12 +33,128 @@ Created December 2006 by Marko Makela #include "buf0lru.h" #include "buf0flu.h" #include "page0zip.h" +#include "srv0start.h" + +/** When freeing a buf we attempt to coalesce by looking at its buddy +and deciding whether it is free or not. To ascertain if the buddy is +free we look for BUF_BUDDY_STAMP_FREE at BUF_BUDDY_STAMP_OFFSET +within the buddy. The question is how we can be sure that it is +safe to look at BUF_BUDDY_STAMP_OFFSET. +The answer lies in following invariants: +* All blocks allocated by buddy allocator are used for compressed +page frame. +* A compressed table always have space_id < SRV_LOG_SPACE_FIRST_ID +* BUF_BUDDY_STAMP_OFFSET always points to the space_id field in +a frame. + -- The above is true because we look at these fields when the + corresponding buddy block is free which implies that: + * The block we are looking at must have an address aligned at + the same size that its free buddy has. For example, if we have + a free block of 8K then its buddy's address must be aligned at + 8K as well. + * It is possible that the block we are looking at may have been + further divided into smaller sized blocks but its starting + address must still remain the start of a page frame i.e.: it + cannot be middle of a block. For example, if we have a free + block of size 8K then its buddy may be divided into blocks + of, say, 1K, 1K, 2K, 4K but the buddy's address will still be + the starting address of first 1K compressed page. + * What is important to note is that for any given block, the + buddy's address cannot be in the middle of a larger block i.e.: + in above example, our 8K block cannot have a buddy whose address + is aligned on 8K but it is part of a larger 16K block. +*/ + +/** Offset within buf_buddy_free_t where free or non_free stamps +are written.*/ +#define BUF_BUDDY_STAMP_OFFSET FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID + +/** Value that we stamp on all buffers that are currently on the zip_free +list. This value is stamped at BUF_BUDDY_STAMP_OFFSET offset */ +#define BUF_BUDDY_STAMP_FREE (SRV_LOG_SPACE_FIRST_ID) + +/** Stamp value for non-free buffers. Will be overwritten by a non-zero +value by the consumer of the block */ +#define BUF_BUDDY_STAMP_NONFREE (0XFFFFFFFF) + +#if BUF_BUDDY_STAMP_FREE >= BUF_BUDDY_STAMP_NONFREE +# error "BUF_BUDDY_STAMP_FREE >= BUF_BUDDY_STAMP_NONFREE" +#endif + +/** Return type of buf_buddy_is_free() */ +enum buf_buddy_state_t { + BUF_BUDDY_STATE_FREE, /*!< If the buddy to completely free */ + BUF_BUDDY_STATE_USED, /*!< Buddy currently in used */ + BUF_BUDDY_STATE_PARTIALLY_USED/*!< Some sub-blocks in the buddy + are in use */ +}; + +#ifdef UNIV_DEBUG_VALGRIND +/**********************************************************************//** +Invalidate memory area that we won't access while page is free */ +UNIV_INLINE +void +buf_buddy_mem_invalid( +/*==================*/ + buf_buddy_free_t* buf, /*!< in: block to check */ + ulint i) /*!< in: index of zip_free[] */ +{ + const size_t size = BUF_BUDDY_LOW << i; + ut_ad(i <= BUF_BUDDY_SIZES); + + UNIV_MEM_ASSERT_W(buf, size); + UNIV_MEM_INVALID(buf, size); +} +#else /* UNIV_DEBUG_VALGRIND */ +# define buf_buddy_mem_invalid(buf, i) ut_ad((i) <= BUF_BUDDY_SIZES) +#endif /* UNIV_DEBUG_VALGRIND */ + +/**********************************************************************//** +Check if a buddy is stamped free. +@return whether the buddy is free */ +UNIV_INLINE __attribute__((warn_unused_result)) +bool +buf_buddy_stamp_is_free( +/*====================*/ + const buf_buddy_free_t* buf) /*!< in: block to check */ +{ + return(mach_read_from_4(buf->stamp.bytes + BUF_BUDDY_STAMP_OFFSET) + == BUF_BUDDY_STAMP_FREE); +} + +/**********************************************************************//** +Stamps a buddy free. */ +UNIV_INLINE +void +buf_buddy_stamp_free( +/*=================*/ + buf_buddy_free_t* buf, /*!< in/out: block to stamp */ + ulint i) /*!< in: block size */ +{ + ut_d(memset(buf, i, BUF_BUDDY_LOW << i)); + buf_buddy_mem_invalid(buf, i); + mach_write_to_4(buf->stamp.bytes + BUF_BUDDY_STAMP_OFFSET, + BUF_BUDDY_STAMP_FREE); + buf->stamp.size = i; +} + +/**********************************************************************//** +Stamps a buddy nonfree. +@param[in/out] buf block to stamp +@param[in] i block size */ +#define buf_buddy_stamp_nonfree(buf, i) do { \ + buf_buddy_mem_invalid(buf, i); \ + memset(buf->stamp.bytes + BUF_BUDDY_STAMP_OFFSET, 0xff, 4); \ +} while (0) +#if BUF_BUDDY_STAMP_NONFREE != 0xffffffff +# error "BUF_BUDDY_STAMP_NONFREE != 0xffffffff" +#endif /**********************************************************************//** Get the offset of the buddy of a compressed page frame. @return the buddy relative of page */ UNIV_INLINE -byte* +void* buf_buddy_get( /*==========*/ byte* page, /*!< in: compressed page */ @@ -60,14 +176,96 @@ buf_buddy_get( /** Validate a given zip_free list. */ struct CheckZipFree { - void operator()(const buf_page_t* elem) const + ulint i; + CheckZipFree(ulint i) : i (i) {} + + void operator()(const buf_buddy_free_t* elem) const { - ut_a(buf_page_get_state(elem) == BUF_BLOCK_ZIP_FREE); + ut_a(buf_buddy_stamp_is_free(elem)); + ut_a(elem->stamp.size <= i); } }; #define BUF_BUDDY_LIST_VALIDATE(bp, i) \ - UT_LIST_VALIDATE(list, buf_page_t, bp->zip_free[i], CheckZipFree()) + UT_LIST_VALIDATE(list, buf_buddy_free_t, \ + bp->zip_free[i], CheckZipFree(i)) + +#ifdef UNIV_DEBUG +/**********************************************************************//** +Debug function to validate that a buffer is indeed free i.e.: in the +zip_free[]. +@return true if free */ +UNIV_INLINE +bool +buf_buddy_check_free( +/*=================*/ + buf_pool_t* buf_pool,/*!< in: buffer pool instance */ + const buf_buddy_free_t* buf, /*!< in: block to check */ + ulint i) /*!< in: index of buf_pool->zip_free[] */ +{ + const ulint size = BUF_BUDDY_LOW << i; + + ut_ad(buf_pool_mutex_own(buf_pool)); + ut_ad(!ut_align_offset(buf, size)); + ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN)); + + buf_buddy_free_t* itr; + + for (itr = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); + itr && itr != buf; + itr = UT_LIST_GET_NEXT(list, itr)) { + } + + return(itr == buf); +} +#endif /* UNIV_DEBUG */ + +/**********************************************************************//** +Checks if a buf is free i.e.: in the zip_free[]. +@retval BUF_BUDDY_STATE_FREE if fully free +@retval BUF_BUDDY_STATE_USED if currently in use +@retval BUF_BUDDY_STATE_PARTIALLY_USED if partially in use. */ +static __attribute__((warn_unused_result)) +buf_buddy_state_t +buf_buddy_is_free( +/*==============*/ + buf_buddy_free_t* buf, /*!< in: block to check */ + ulint i) /*!< in: index of + buf_pool->zip_free[] */ +{ +#ifdef UNIV_DEBUG + const ulint size = BUF_BUDDY_LOW << i; + ut_ad(!ut_align_offset(buf, size)); + ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN)); +#endif /* UNIV_DEBUG */ + + /* We assume that all memory from buf_buddy_alloc() + is used for compressed page frames. */ + + /* We look inside the allocated objects returned by + buf_buddy_alloc() and assume that each block is a compressed + page that contains one of the following in space_id. + * BUF_BUDDY_STAMP_FREE if the block is in a zip_free list or + * BUF_BUDDY_STAMP_NONFREE if the block has been allocated but + not initialized yet or + * A valid space_id of a compressed tablespace + + The call below attempts to read from free memory. The memory + is "owned" by the buddy allocator (and it has been allocated + from the buffer pool), so there is nothing wrong about this. */ + if (!buf_buddy_stamp_is_free(buf)) { + return(BUF_BUDDY_STATE_USED); + } + + /* A block may be free but a fragment of it may still be in use. + To guard against that we write the free block size in terms of + zip_free index at start of stamped block. Note that we can + safely rely on this value only if the buf is free. */ + ut_ad(buf->stamp.size <= i); + return(buf->stamp.size == i + ? BUF_BUDDY_STATE_FREE + : BUF_BUDDY_STATE_PARTIALLY_USED); +} /**********************************************************************//** Add a block to the head of the appropriate buddy free list. */ @@ -75,15 +273,17 @@ UNIV_INLINE void buf_buddy_add_to_free( /*==================*/ - buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - buf_page_t* bpage, /*!< in,own: block to be freed */ - ulint i) /*!< in: index of - buf_pool->zip_free[] */ + buf_pool_t* buf_pool, /*!< in: buffer pool instance */ + buf_buddy_free_t* buf, /*!< in,own: block to be freed */ + ulint i) /*!< in: index of + buf_pool->zip_free[] */ { ut_ad(buf_pool_mutex_own(buf_pool)); - ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); - ut_ad(buf_pool->zip_free[i].start != bpage); - UT_LIST_ADD_FIRST(list, buf_pool->zip_free[i], bpage); + ut_ad(buf_pool->zip_free[i].start != buf); + + buf_buddy_stamp_free(buf, i); + UT_LIST_ADD_FIRST(list, buf_pool->zip_free[i], buf); + ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); } /**********************************************************************//** @@ -92,35 +292,29 @@ UNIV_INLINE void buf_buddy_remove_from_free( /*=======================*/ - buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - buf_page_t* bpage, /*!< in: block to be removed */ - ulint i) /*!< in: index of - buf_pool->zip_free[] */ + buf_pool_t* buf_pool, /*!< in: buffer pool instance */ + buf_buddy_free_t* buf, /*!< in,own: block to be freed */ + ulint i) /*!< in: index of + buf_pool->zip_free[] */ { -#ifdef UNIV_DEBUG - buf_page_t* prev = UT_LIST_GET_PREV(list, bpage); - buf_page_t* next = UT_LIST_GET_NEXT(list, bpage); - - ut_ad(!prev || buf_page_get_state(prev) == BUF_BLOCK_ZIP_FREE); - ut_ad(!next || buf_page_get_state(next) == BUF_BLOCK_ZIP_FREE); -#endif /* UNIV_DEBUG */ - ut_ad(buf_pool_mutex_own(buf_pool)); - ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); - UT_LIST_REMOVE(list, buf_pool->zip_free[i], bpage); + ut_ad(buf_buddy_check_free(buf_pool, buf, i)); + + UT_LIST_REMOVE(list, buf_pool->zip_free[i], buf); + buf_buddy_stamp_nonfree(buf, i); } /**********************************************************************//** Try to allocate a block from buf_pool->zip_free[]. @return allocated block, or NULL if buf_pool->zip_free[] was empty */ static -void* +buf_buddy_free_t* buf_buddy_alloc_zip( /*================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ ulint i) /*!< in: index of buf_pool->zip_free[] */ { - buf_page_t* bpage; + buf_buddy_free_t* buf; ut_ad(buf_pool_mutex_own(buf_pool)); ut_a(i < BUF_BUDDY_SIZES); @@ -128,33 +322,38 @@ buf_buddy_alloc_zip( ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); - bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); - - if (bpage) { - ut_a(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); + buf = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); - buf_buddy_remove_from_free(buf_pool, bpage, i); + if (buf) { + buf_buddy_remove_from_free(buf_pool, buf, i); } else if (i + 1 < BUF_BUDDY_SIZES) { /* Attempt to split. */ - bpage = (buf_page_t*) buf_buddy_alloc_zip(buf_pool, i + 1); + buf = buf_buddy_alloc_zip(buf_pool, i + 1); - if (bpage) { - buf_page_t* buddy = (buf_page_t*) - (((char*) bpage) + (BUF_BUDDY_LOW << i)); + if (buf) { + buf_buddy_free_t* buddy = + reinterpret_cast<buf_buddy_free_t*>( + buf->stamp.bytes + + (BUF_BUDDY_LOW << i)); ut_ad(!buf_pool_contains_zip(buf_pool, buddy)); - ut_d(memset(buddy, i, BUF_BUDDY_LOW << i)); - buddy->state = BUF_BLOCK_ZIP_FREE; buf_buddy_add_to_free(buf_pool, buddy, i); } } - if (bpage) { - ut_d(memset(bpage, ~i, BUF_BUDDY_LOW << i)); - UNIV_MEM_ALLOC(bpage, BUF_BUDDY_SIZES << i); + if (buf) { + /* Trash the page other than the BUF_BUDDY_STAMP_NONFREE. */ + UNIV_MEM_TRASH(buf, ~i, BUF_BUDDY_STAMP_OFFSET); + UNIV_MEM_TRASH(BUF_BUDDY_STAMP_OFFSET + 4 + + buf->stamp.bytes, ~i, + (BUF_BUDDY_LOW << i) + - (BUF_BUDDY_STAMP_OFFSET + 4)); + ut_ad(mach_read_from_4(buf->stamp.bytes + + BUF_BUDDY_STAMP_OFFSET) + == BUF_BUDDY_STAMP_NONFREE); } - return(bpage); + return(buf); } /**********************************************************************//** @@ -246,18 +445,17 @@ buf_buddy_alloc_from( /* Add the unused parts of the block to the free lists. */ while (j > i) { - buf_page_t* bpage; + buf_buddy_free_t* zip_buf; offs >>= 1; j--; - bpage = (buf_page_t*) ((byte*) buf + offs); - ut_d(memset(bpage, j, BUF_BUDDY_LOW << j)); - bpage->state = BUF_BLOCK_ZIP_FREE; - ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); - buf_buddy_add_to_free(buf_pool, bpage, j); + zip_buf = reinterpret_cast<buf_buddy_free_t*>( + reinterpret_cast<byte*>(buf) + offs); + buf_buddy_add_to_free(buf_pool, zip_buf, j); } + buf_buddy_stamp_nonfree(reinterpret_cast<buf_buddy_free_t*>(buf), i); return(buf); } @@ -322,9 +520,9 @@ func_exit: /**********************************************************************//** Try to relocate a block. -@return TRUE if relocated */ +@return true if relocated */ static -ibool +bool buf_buddy_relocate( /*===============*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ @@ -337,7 +535,7 @@ buf_buddy_relocate( const ulint size = BUF_BUDDY_LOW << i; ib_mutex_t* mutex; ulint space; - ulint page_no; + ulint offset; ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); @@ -346,32 +544,19 @@ buf_buddy_relocate( ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN)); UNIV_MEM_ASSERT_W(dst, size); - /* We assume that all memory from buf_buddy_alloc() - is used for compressed page frames. */ - - /* We look inside the allocated objects returned by - buf_buddy_alloc() and assume that each block is a compressed - page that contains a valid space_id and page_no in the page - header. Should the fields be invalid, we will be unable to - relocate the block. */ - - /* The src block may be split into smaller blocks, - some of which may be free. Thus, the - mach_read_from_4() calls below may attempt to read - from free memory. The memory is "owned" by the buddy - allocator (and it has been allocated from the buffer - pool), so there is nothing wrong about this. The - mach_read_from_4() calls here will only trigger bogus - Valgrind memcheck warnings in UNIV_DEBUG_VALGRIND builds. */ space = mach_read_from_4((const byte*) src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - page_no = mach_read_from_4((const byte*) src + offset = mach_read_from_4((const byte*) src + FIL_PAGE_OFFSET); + /* Suppress Valgrind warnings about conditional jump on uninitialized value. */ UNIV_MEM_VALID(&space, sizeof space); - UNIV_MEM_VALID(&page_no, sizeof page_no); - bpage = buf_page_hash_get(buf_pool, space, page_no); + UNIV_MEM_VALID(&offset, sizeof offset); + + ut_ad(space != BUF_BUDDY_STAMP_FREE); + + bpage = buf_page_hash_get(buf_pool, space, offset); if (!bpage || bpage->zip.data != src) { /* The block has probably been freshly @@ -379,7 +564,7 @@ buf_buddy_relocate( added to buf_pool->page_hash yet. Obviously, it cannot be relocated. */ - return(FALSE); + return(false); } if (page_zip_get_size(&bpage->zip) != size) { @@ -388,7 +573,7 @@ buf_buddy_relocate( For the sake of simplicity, give up. */ ut_ad(page_zip_get_size(&bpage->zip) < size); - return(FALSE); + return(false); } /* The block must have been allocated, but it may @@ -406,19 +591,17 @@ buf_buddy_relocate( memcpy(dst, src, size); bpage->zip.data = (page_zip_t*) dst; mutex_exit(mutex); - UNIV_MEM_INVALID(src, size); - { - buf_buddy_stat_t* buddy_stat - = &buf_pool->buddy_stat[i]; - buddy_stat->relocated++; - buddy_stat->relocated_usec - += ut_time_us(NULL) - usec; - } - return(TRUE); + buf_buddy_mem_invalid( + reinterpret_cast<buf_buddy_free_t*>(src), i); + + buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i]; + buddy_stat->relocated++; + buddy_stat->relocated_usec += ut_time_us(NULL) - usec; + return(true); } mutex_exit(mutex); - return(FALSE); + return(false); } /**********************************************************************//** @@ -433,8 +616,7 @@ buf_buddy_free_low( ulint i) /*!< in: index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ { - buf_page_t* bpage; - buf_page_t* buddy; + buf_buddy_free_t* buddy; ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); @@ -445,7 +627,6 @@ buf_buddy_free_low( buf_pool->buddy_stat[i].used--; recombine: UNIV_MEM_ASSERT_AND_ALLOC(buf, BUF_BUDDY_LOW << i); - ((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE; if (i == BUF_BUDDY_SIZES) { buf_buddy_block_free(buf_pool, buf); @@ -464,73 +645,54 @@ recombine: } /* Try to combine adjacent blocks. */ - buddy = (buf_page_t*) buf_buddy_get(((byte*) buf), BUF_BUDDY_LOW << i); - -#ifndef UNIV_DEBUG_VALGRIND - /* When Valgrind instrumentation is not enabled, we can read - buddy->state to quickly determine that a block is not free. - When the block is not free, buddy->state belongs to a compressed - page frame that may be flagged uninitialized in our Valgrind - instrumentation. */ - - if (buddy->state != BUF_BLOCK_ZIP_FREE) { - - goto buddy_nonfree; - } -#endif /* !UNIV_DEBUG_VALGRIND */ - - for (bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); bpage; ) { - ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); - - if (bpage == buddy) { - /* The buddy is free: recombine */ - buf_buddy_remove_from_free(buf_pool, bpage, i); + buddy = reinterpret_cast<buf_buddy_free_t*>( + buf_buddy_get(reinterpret_cast<byte*>(buf), + BUF_BUDDY_LOW << i)); + + switch (buf_buddy_is_free(buddy, i)) { + case BUF_BUDDY_STATE_FREE: + /* The buddy is free: recombine */ + buf_buddy_remove_from_free(buf_pool, buddy, i); buddy_is_free: - ut_ad(buf_page_get_state(buddy) == BUF_BLOCK_ZIP_FREE); - ut_ad(!buf_pool_contains_zip(buf_pool, buddy)); - i++; - buf = ut_align_down(buf, BUF_BUDDY_LOW << i); + ut_ad(!buf_pool_contains_zip(buf_pool, buddy)); + i++; + buf = ut_align_down(buf, BUF_BUDDY_LOW << i); - goto recombine; - } + goto recombine; - ut_a(bpage != buf); - UNIV_MEM_ASSERT_W(bpage, BUF_BUDDY_LOW << i); - bpage = UT_LIST_GET_NEXT(list, bpage); - } - -#ifndef UNIV_DEBUG_VALGRIND -buddy_nonfree: -#endif /* !UNIV_DEBUG_VALGRIND */ - - ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); + case BUF_BUDDY_STATE_USED: + ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); - /* The buddy is not free. Is there a free block of this size? */ - bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); + /* The buddy is not free. Is there a free block of + this size? */ + if (buf_buddy_free_t* zip_buf = + UT_LIST_GET_FIRST(buf_pool->zip_free[i])) { - if (bpage) { + /* Remove the block from the free list, because + a successful buf_buddy_relocate() will overwrite + zip_free->list. */ + buf_buddy_remove_from_free(buf_pool, zip_buf, i); - /* Remove the block from the free list, because a successful - buf_buddy_relocate() will overwrite bpage->list. */ - buf_buddy_remove_from_free(buf_pool, bpage, i); + /* Try to relocate the buddy of buf to the free + block. */ + if (buf_buddy_relocate(buf_pool, buddy, zip_buf, i)) { - /* Try to relocate the buddy of buf to the free block. */ - if (buf_buddy_relocate(buf_pool, buddy, bpage, i)) { + goto buddy_is_free; + } - buddy->state = BUF_BLOCK_ZIP_FREE; - goto buddy_is_free; + buf_buddy_add_to_free(buf_pool, zip_buf, i); } - buf_buddy_add_to_free(buf_pool, bpage, i); + break; + case BUF_BUDDY_STATE_PARTIALLY_USED: + /* Some sub-blocks in the buddy are still in use. + Relocation will fail. No need to try. */ + break; } func_exit: /* Free the block to the buddy list. */ - bpage = (buf_page_t*) buf; - - /* Fill large blocks with a constant pattern. */ - ut_d(memset(bpage, i, BUF_BUDDY_LOW << i)); - UNIV_MEM_INVALID(bpage, BUF_BUDDY_LOW << i); - bpage->state = BUF_BLOCK_ZIP_FREE; - buf_buddy_add_to_free(buf_pool, bpage, i); + buf_buddy_add_to_free(buf_pool, + reinterpret_cast<buf_buddy_free_t*>(buf), + i); } |