summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2019-10-04 11:48:45 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2019-10-04 11:48:45 +0300
commit18dee4363b839ab9441167e5a41a7f4f1d249650 (patch)
treed96241eb0d67190b3f311be68b60aa62ce1d7e88
parent5e65c67cfcc79c48c22798babee7555b04f40d18 (diff)
downloadmariadb-git-bb-10.2-MDEV-19344.tar.gz
MDEV-19344 InnoDB purge buffering may corrupt a pagebb-10.2-MDEV-19344
MySQL 5.5 introduced the ability of InnoDB to buffer delete-mark and delete operations (the insert buffer was generalized to the change buffer). These operations were only buffered on DELETE, UPDATE (of a key) and the purge of history of committed transactions. We never buffered anything on ROLLBACK; it could have been beneficial for rolling back a large recovered transaction. The delete-mark buffering appears to work fine, but there are problems with the purge buffering. MySQL Bug #61104 InnoDB: Failing assertion: page_get_n_recs(page) > 1 reported a problem with the purge buffering: an index page could become empty, which essentially means that the secondary index becomes corrupted. At MariaDB, we got closer to the root cause, by making a failure repeatable with innodb.innodb-change-buffer-recovery-innodb when an additional debug assertion is present. page_header_set_field(): Enable the assertion that PAGE_N_RECS must never be set to 0. (This function will not be invoked when initializing an empty page. If an index page is empty, it must be the root page, and the table must be empty. No changes to the root page are ever buffered.) A combination of two asynchronous, inherently nondeterministic operations (purge and change buffering) is difficult to cover in tests or to reason about. The purge buffering required a complex mechanism in the buffer pool, the buffer pool watch. If we no longer buffer purge operations, we can remove the watch as well. We fix this by ceasing to buffer delete (purge) operations, that is, by treating innodb_change_buffering=all (the default) in the same way as innodb_change_buffering=changes and treating innodb_change_buffering=purges in the same way as innodb_change_buffering=deletes. MDEV-16260 will attempt to improve the performance of purge in a more controlled fashion by scaling the effort according to the workload. We will retain the code that merges buffered purge operations, so that upgrades from older versions will be possible. BTR_DELETE_OP, BTR_DELETE, BUF_BLOCK_POOL_WATCH, BUF_GET_IF_IN_POOL_OR_WATCH, BUF_POOL_WATCH_SIZE, ROW_NOT_DELETED_REF: Remove. btr_cur_t::purge_node, buf_pool_t::watch: Remove. ibuf_get_volume_buffered_hash(): Remove. It is no longer necessary to estimate whether the page could become empty.
-rw-r--r--storage/innobase/btr/btr0cur.cc39
-rw-r--r--storage/innobase/buf/buf0buf.cc450
-rw-r--r--storage/innobase/buf/buf0flu.cc2
-rw-r--r--storage/innobase/buf/buf0lru.cc3
-rw-r--r--storage/innobase/handler/ha_innodb.cc4
-rw-r--r--storage/innobase/handler/i_s.cc2
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc226
-rw-r--r--storage/innobase/include/btr0btr.h12
-rw-r--r--storage/innobase/include/btr0cur.h7
-rw-r--r--storage/innobase/include/buf0buf.h61
-rw-r--r--storage/innobase/include/buf0buf.ic27
-rw-r--r--storage/innobase/include/ibuf0ibuf.h15
-rw-r--r--storage/innobase/include/page0page.ic2
-rw-r--r--storage/innobase/include/row0purge.h8
-rw-r--r--storage/innobase/include/row0row.h6
-rw-r--r--storage/innobase/row/row0log.cc3
-rw-r--r--storage/innobase/row/row0purge.cc23
-rw-r--r--storage/innobase/row/row0row.cc4
-rw-r--r--storage/innobase/row/row0uins.cc5
-rw-r--r--storage/innobase/row/row0umod.cc10
-rw-r--r--storage/innobase/row/row0upd.cc3
21 files changed, 106 insertions, 806 deletions
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index b23cacac227..f5580158758 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -73,7 +73,6 @@ enum btr_op_t {
BTR_NO_OP = 0, /*!< Not buffered */
BTR_INSERT_OP, /*!< Insert, do not ignore UNIQUE */
BTR_INSERT_IGNORE_UNIQUE_OP, /*!< Insert, ignoring UNIQUE */
- BTR_DELETE_OP, /*!< Purge a delete-marked record */
BTR_DELMARK_OP /*!< Mark a record for deletion */
};
@@ -860,7 +859,7 @@ btr_cur_search_to_nth_level(
PAGE_CUR_LE to search the position! */
ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ..., ORed with
at most one of BTR_INSERT, BTR_DELETE_MARK,
- BTR_DELETE, or BTR_ESTIMATE;
+ or BTR_ESTIMATE;
cursor->left_block is used to store a pointer
to the left neighbor page, in the cases
BTR_SEARCH_PREV and BTR_MODIFY_PREV;
@@ -968,7 +967,7 @@ btr_cur_search_to_nth_level(
with the latch mode for historical reasons. It's possible for
none of the flags to be set. */
switch (UNIV_EXPECT(latch_mode
- & (BTR_INSERT | BTR_DELETE | BTR_DELETE_MARK),
+ & (BTR_INSERT | BTR_DELETE_MARK),
0)) {
case 0:
btr_op = BTR_NO_OP;
@@ -978,15 +977,11 @@ btr_cur_search_to_nth_level(
? BTR_INSERT_IGNORE_UNIQUE_OP
: BTR_INSERT_OP;
break;
- case BTR_DELETE:
- btr_op = BTR_DELETE_OP;
- ut_a(cursor->purge_node);
- break;
case BTR_DELETE_MARK:
btr_op = BTR_DELMARK_OP;
break;
default:
- /* only one of BTR_INSERT, BTR_DELETE, BTR_DELETE_MARK
+ /* only one of BTR_INSERT, BTR_DELETE_MARK
should be specified at a time */
ut_error;
}
@@ -1230,9 +1225,7 @@ search_loop:
/* Try to buffer the operation if the leaf
page is not in the buffer pool. */
- buf_mode = btr_op == BTR_DELETE_OP
- ? BUF_GET_IF_IN_POOL_OR_WATCH
- : BUF_GET_IF_IN_POOL;
+ buf_mode = BUF_GET_IF_IN_POOL;
}
}
@@ -1298,30 +1291,6 @@ retry_page_get:
break;
- case BTR_DELETE_OP:
- ut_ad(buf_mode == BUF_GET_IF_IN_POOL_OR_WATCH);
- ut_ad(!dict_index_is_spatial(index));
-
- if (!row_purge_poss_sec(cursor->purge_node,
- index, tuple)) {
-
- /* The record cannot be purged yet. */
- cursor->flag = BTR_CUR_DELETE_REF;
- } else if (ibuf_insert(IBUF_OP_DELETE, tuple,
- index, page_id, page_size,
- cursor->thr)) {
-
- /* The purge was buffered. */
- cursor->flag = BTR_CUR_DELETE_IBUF;
- } else {
- /* The purge could not be buffered. */
- buf_pool_watch_unset(page_id);
- break;
- }
-
- buf_pool_watch_unset(page_id);
- goto func_exit;
-
default:
ut_error;
}
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index e3c6605652f..8c37d756765 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -1762,7 +1762,6 @@ buf_chunk_not_freed(
ibool ready;
switch (buf_block_get_state(block)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
/* The uncompressed buffer pool should never
@@ -1953,13 +1952,6 @@ buf_pool_init_instance(
buf_pool->no_flush[i] = os_event_create(0);
}
- buf_pool->watch = (buf_page_t*) ut_zalloc_nokey(
- sizeof(*buf_pool->watch) * BUF_POOL_WATCH_SIZE);
- for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
- buf_pool->watch[i].buf_pool_index
- = unsigned(buf_pool->instance_no);
- }
-
/* All fields are initialized by ut_zalloc_nokey(). */
buf_pool->try_LRU_scan = TRUE;
@@ -2035,9 +2027,6 @@ buf_pool_free_instance(
}
}
- ut_free(buf_pool->watch);
- buf_pool->watch = NULL;
-
chunks = buf_pool->chunks;
chunk = chunks + buf_pool->n_chunks;
@@ -3301,10 +3290,8 @@ buf_relocate(
ut_ad(bpage->in_page_hash);
ut_ad(bpage == buf_page_hash_get_low(buf_pool, bpage->id));
- ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
#ifdef UNIV_DEBUG
switch (buf_page_get_state(bpage)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_FILE_PAGE:
@@ -3440,241 +3427,6 @@ LRUItr::start()
return(m_hp);
}
-/** Determine if a block is a sentinel for a buffer pool watch.
-@param[in] buf_pool buffer pool instance
-@param[in] bpage block
-@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
-ibool
-buf_pool_watch_is_sentinel(
- const buf_pool_t* buf_pool,
- const buf_page_t* bpage)
-{
- /* We must also own the appropriate hash lock. */
- ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage));
- ut_ad(buf_page_in_file(bpage));
-
- if (bpage < &buf_pool->watch[0]
- || bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
-
- ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
- || bpage->zip.data != NULL);
-
- return(FALSE);
- }
-
- ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
- ut_ad(!bpage->in_zip_hash);
- ut_ad(bpage->in_page_hash);
- ut_ad(bpage->zip.data == NULL);
- return(TRUE);
-}
-
-/** Add watch for the given page to be read in. Caller must have
-appropriate hash_lock for the bpage. This function may release the
-hash_lock and reacquire it.
-@param[in] page_id page id
-@param[in,out] hash_lock hash_lock currently latched
-@return NULL if watch set, block if the page is in the buffer pool */
-static
-buf_page_t*
-buf_pool_watch_set(
- const page_id_t page_id,
- rw_lock_t** hash_lock)
-{
- buf_page_t* bpage;
- ulint i;
- buf_pool_t* buf_pool = buf_pool_get(page_id);
-
- ut_ad(*hash_lock == buf_page_hash_lock_get(buf_pool, page_id));
-
- ut_ad(rw_lock_own(*hash_lock, RW_LOCK_X));
-
- bpage = buf_page_hash_get_low(buf_pool, page_id);
-
- if (bpage != NULL) {
-page_found:
- if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) {
- /* The page was loaded meanwhile. */
- return(bpage);
- }
-
- /* Add to an existing watch. */
- buf_block_fix(bpage);
- return(NULL);
- }
-
- /* From this point this function becomes fairly heavy in terms
- of latching. We acquire the buf_pool mutex as well as all the
- hash_locks. buf_pool mutex is needed because any changes to
- the page_hash must be covered by it and hash_locks are needed
- because we don't want to read any stale information in
- buf_pool->watch[]. However, it is not in the critical code path
- as this function will be called only by the purge thread. */
-
- /* To obey latching order first release the hash_lock. */
- rw_lock_x_unlock(*hash_lock);
-
- buf_pool_mutex_enter(buf_pool);
- hash_lock_x_all(buf_pool->page_hash);
-
- /* If not own buf_pool_mutex, page_hash can be changed. */
- *hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
-
- /* We have to recheck that the page
- was not loaded or a watch set by some other
- purge thread. This is because of the small
- time window between when we release the
- hash_lock to acquire buf_pool mutex above. */
-
- bpage = buf_page_hash_get_low(buf_pool, page_id);
- if (UNIV_LIKELY_NULL(bpage)) {
- buf_pool_mutex_exit(buf_pool);
- hash_unlock_x_all_but(buf_pool->page_hash, *hash_lock);
- goto page_found;
- }
-
- /* The maximum number of purge threads should never exceed
- BUF_POOL_WATCH_SIZE. So there is no way for purge thread
- instance to hold a watch when setting another watch. */
- for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
- bpage = &buf_pool->watch[i];
-
- ut_ad(bpage->access_time == 0);
- ut_ad(bpage->newest_modification == 0);
- ut_ad(bpage->oldest_modification == 0);
- ut_ad(bpage->zip.data == NULL);
- ut_ad(!bpage->in_zip_hash);
-
- switch (bpage->state) {
- case BUF_BLOCK_POOL_WATCH:
- ut_ad(!bpage->in_page_hash);
- ut_ad(bpage->buf_fix_count == 0);
-
- /* bpage is pointing to buf_pool->watch[],
- which is protected by buf_pool->mutex.
- Normally, buf_page_t objects are protected by
- buf_block_t::mutex or buf_pool->zip_mutex or both. */
-
- bpage->state = BUF_BLOCK_ZIP_PAGE;
- bpage->id = page_id;
- bpage->buf_fix_count = 1;
-
- ut_d(bpage->in_page_hash = TRUE);
- HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
- page_id.fold(), bpage);
-
- buf_pool_mutex_exit(buf_pool);
- /* Once the sentinel is in the page_hash we can
- safely release all locks except just the
- relevant hash_lock */
- hash_unlock_x_all_but(buf_pool->page_hash,
- *hash_lock);
-
- return(NULL);
- case BUF_BLOCK_ZIP_PAGE:
- ut_ad(bpage->in_page_hash);
- ut_ad(bpage->buf_fix_count > 0);
- break;
- default:
- ut_error;
- }
- }
-
- /* Allocation failed. Either the maximum number of purge
- threads should never exceed BUF_POOL_WATCH_SIZE, or this code
- should be modified to return a special non-NULL value and the
- caller should purge the record directly. */
- ut_error;
-
- /* Fix compiler warning */
- return(NULL);
-}
-
-/** Remove the sentinel block for the watch before replacing it with a
-real block. buf_page_watch_clear() or buf_page_watch_occurred() will notice
-that the block has been replaced with the real block.
-@param[in,out] buf_pool buffer pool instance
-@param[in,out] watch sentinel for watch
-@return reference count, to be added to the replacement block */
-static
-void
-buf_pool_watch_remove(
- buf_pool_t* buf_pool,
- buf_page_t* watch)
-{
-#ifdef UNIV_DEBUG
- /* We must also own the appropriate hash_bucket mutex. */
- rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, watch->id);
- ut_ad(rw_lock_own(hash_lock, RW_LOCK_X));
-#endif /* UNIV_DEBUG */
-
- ut_ad(buf_pool_mutex_own(buf_pool));
-
- HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, watch->id.fold(),
- watch);
- ut_d(watch->in_page_hash = FALSE);
- watch->buf_fix_count = 0;
- watch->state = BUF_BLOCK_POOL_WATCH;
-}
-
-/** Stop watching if the page has been read in.
-buf_pool_watch_set(same_page_id) must have returned NULL before.
-@param[in] page_id page id */
-void buf_pool_watch_unset(const page_id_t page_id)
-{
- buf_page_t* bpage;
- buf_pool_t* buf_pool = buf_pool_get(page_id);
-
- /* We only need to have buf_pool mutex in case where we end
- up calling buf_pool_watch_remove but to obey latching order
- we acquire it here before acquiring hash_lock. This should
- not cause too much grief as this function is only ever
- called from the purge thread. */
- buf_pool_mutex_enter(buf_pool);
-
- rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
- rw_lock_x_lock(hash_lock);
-
- /* The page must exist because buf_pool_watch_set()
- increments buf_fix_count. */
- bpage = buf_page_hash_get_low(buf_pool, page_id);
-
- if (buf_block_unfix(bpage) == 0
- && buf_pool_watch_is_sentinel(buf_pool, bpage)) {
- buf_pool_watch_remove(buf_pool, bpage);
- }
-
- buf_pool_mutex_exit(buf_pool);
- rw_lock_x_unlock(hash_lock);
-}
-
-/** Check if the page has been read in.
-This may only be called after buf_pool_watch_set(same_page_id)
-has returned NULL and before invoking buf_pool_watch_unset(same_page_id).
-@param[in] page_id page id
-@return false if the given page was not read in, true if it was */
-bool buf_pool_watch_occurred(const page_id_t page_id)
-{
- bool ret;
- buf_page_t* bpage;
- buf_pool_t* buf_pool = buf_pool_get(page_id);
- rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
-
- rw_lock_s_lock(hash_lock);
-
- /* If not own buf_pool_mutex, page_hash can be changed. */
- hash_lock = buf_page_hash_lock_s_confirm(hash_lock, buf_pool, page_id);
-
- /* The page must exist because buf_pool_watch_set()
- increments buf_fix_count. */
- bpage = buf_page_hash_get_low(buf_pool, page_id);
-
- ret = !buf_pool_watch_is_sentinel(buf_pool, bpage);
- rw_lock_s_unlock(hash_lock);
-
- return(ret);
-}
-
/********************************************************************//**
Moves a page to the start of the buffer pool LRU list. This high-level
function can be used to prevent an important page from slipping out of
@@ -3735,7 +3487,6 @@ buf_page_t* buf_page_set_file_page_was_freed(const page_id_t page_id)
if (bpage) {
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
mutex_enter(block_mutex);
rw_lock_s_unlock(hash_lock);
/* bpage->file_page_was_freed can already hold
@@ -3762,7 +3513,6 @@ buf_page_t* buf_page_reset_file_page_was_freed(const page_id_t page_id)
bpage = buf_page_hash_get_s_locked(buf_pool, page_id, &hash_lock);
if (bpage) {
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
- ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
mutex_enter(block_mutex);
rw_lock_s_unlock(hash_lock);
bpage->file_page_was_freed = FALSE;
@@ -3830,7 +3580,6 @@ lookup:
bpage = buf_page_hash_get_s_locked(buf_pool, page_id,
&hash_lock);
if (bpage) {
- ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
break;
}
@@ -3860,8 +3609,6 @@ err_exit:
return(NULL);
}
- ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
-
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
@@ -4244,7 +3991,7 @@ buf_wait_for_read(
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
+BUF_PEEK_IF_IN_POOL, or BUF_GET_NO_LATCH
@param[in] file file name
@param[in] line line where called
@param[in] mtr mini-transaction
@@ -4298,7 +4045,6 @@ buf_page_get_gen(
ut_ad(rw_latch == RW_NO_LATCH);
/* fall through */
case BUF_GET:
- case BUF_GET_IF_IN_POOL_OR_WATCH:
case BUF_GET_POSSIBLY_FREED:
bool found;
const page_size_t& space_page_size
@@ -4343,62 +4089,13 @@ loop:
block = (buf_block_t*) buf_page_hash_get_low(buf_pool, page_id);
}
- if (!block || buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
+ if (!block) {
rw_lock_s_unlock(hash_lock);
- block = NULL;
- }
-
- if (block == NULL) {
/* Page not in buf_pool: needs to be read from file */
- if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
- rw_lock_x_lock(hash_lock);
-
- /* If not own buf_pool_mutex,
- page_hash can be changed. */
- hash_lock = buf_page_hash_lock_x_confirm(
- hash_lock, buf_pool, page_id);
-
- block = (buf_block_t*) buf_pool_watch_set(
- page_id, &hash_lock);
-
- if (block) {
- /* We can release hash_lock after we
- increment the fix count to make
- sure that no state change takes place. */
- fix_block = block;
-
- if (fsp_is_system_temporary(page_id.space())) {
- /* For temporary tablespace,
- the mutex is being used for
- synchronization between user
- thread and flush thread,
- instead of block->lock. See
- buf_flush_page() for the flush
- thread counterpart. */
-
- BPageMutex* fix_mutex
- = buf_page_get_mutex(
- &fix_block->page);
- mutex_enter(fix_mutex);
- buf_block_fix(fix_block);
- mutex_exit(fix_mutex);
- } else {
- buf_block_fix(fix_block);
- }
-
- /* Now safe to release page_hash mutex */
- rw_lock_x_unlock(hash_lock);
- goto got_block;
- }
-
- rw_lock_x_unlock(hash_lock);
- }
-
switch (mode) {
case BUF_GET_IF_IN_POOL:
- case BUF_GET_IF_IN_POOL_OR_WATCH:
case BUF_PEEK_IF_IN_POOL:
case BUF_EVICT_IF_IN_POOL:
ut_ad(!rw_lock_own_flagged(
@@ -4501,8 +4198,6 @@ loop:
/* Now safe to release page_hash mutex */
rw_lock_s_unlock(hash_lock);
-got_block:
-
switch (mode) {
case BUF_GET_IF_IN_POOL:
case BUF_PEEK_IF_IN_POOL:
@@ -4728,7 +4423,6 @@ evict_from_pool:
break;
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
@@ -4747,7 +4441,7 @@ evict_from_pool:
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
- if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
+ if (mode == BUF_GET_IF_IN_POOL
&& (ibuf_debug || buf_debug_execute_is_force_flush())) {
/* Try to evict the block from the buffer pool, to use the
@@ -4777,25 +4471,15 @@ evict_from_pool:
hash_lock = buf_page_hash_lock_x_confirm(
hash_lock, buf_pool, page_id);
- if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
- /* Set the watch, as it would have
- been set if the page were not in the
- buffer pool in the first place. */
- block = (buf_block_t*) buf_pool_watch_set(
- page_id, &hash_lock);
- } else {
- block = (buf_block_t*) buf_page_hash_get_low(
- buf_pool, page_id);
- }
+ block = (buf_block_t*) buf_page_hash_get_low(
+ buf_pool, page_id);
rw_lock_x_unlock(hash_lock);
if (block != NULL) {
- /* Either the page has been read in or
- a watch was set on that in the window
- where we released the buf_pool::mutex
- and before we acquire the hash_lock
- above. Try again. */
+ /* The page was read between us
+ invoking buf_pool_mutex_exit()
+ and acquiring hash_lock above. Try again. */
guess = block;
goto loop;
@@ -5179,8 +4863,6 @@ buf_page_try_get_func(
return(NULL);
}
- ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
-
buf_page_mutex_enter(block);
rw_lock_s_unlock(hash_lock);
@@ -5269,8 +4951,6 @@ buf_page_init(
const page_size_t& page_size,
buf_block_t* block)
{
- buf_page_t* hash_page;
-
ut_ad(buf_pool == buf_pool_get(page_id));
ut_ad(buf_pool_mutex_own(buf_pool));
@@ -5300,35 +4980,7 @@ buf_page_init(
buf_page_init_low(&block->page);
/* Insert into the hash table of file pages */
-
- hash_page = buf_page_hash_get_low(buf_pool, page_id);
-
- if (hash_page == NULL) {
- /* Block not found in hash table */
- } else if (buf_pool_watch_is_sentinel(buf_pool, hash_page)) {
- /* Preserve the reference count. */
- ib_uint32_t buf_fix_count = hash_page->buf_fix_count;
-
- ut_a(buf_fix_count > 0);
-
- my_atomic_add32((int32*) &block->page.buf_fix_count, buf_fix_count);
-
- buf_pool_watch_remove(buf_pool, hash_page);
- } else {
-
- ib::error() << "Page " << page_id
- << " already found in the hash table: "
- << hash_page << ", " << block;
-
- ut_d(buf_page_mutex_exit(block));
- ut_d(buf_pool_mutex_exit(buf_pool));
- ut_d(buf_print());
- ut_d(buf_LRU_print());
- ut_d(buf_validate());
- ut_d(buf_LRU_validate());
- ut_error;
- }
-
+ DBUG_ASSERT(!buf_page_hash_get_low(buf_pool, page_id));
ut_ad(!block->page.in_zip_hash);
ut_ad(!block->page.in_page_hash);
ut_d(block->page.in_page_hash = TRUE);
@@ -5369,7 +5021,6 @@ buf_page_init_for_read(
{
buf_block_t* block;
buf_page_t* bpage = NULL;
- buf_page_t* watch_page;
rw_lock_t* hash_lock;
mtr_t mtr;
ibool lru = FALSE;
@@ -5411,10 +5062,8 @@ buf_page_init_for_read(
hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
rw_lock_x_lock(hash_lock);
- watch_page = buf_page_hash_get_low(buf_pool, page_id);
- if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) {
+ if (buf_page_hash_get_low(buf_pool, page_id)) {
/* The page is already in the buffer pool. */
- watch_page = NULL;
rw_lock_x_unlock(hash_lock);
if (block) {
buf_page_mutex_enter(block);
@@ -5496,23 +5145,14 @@ buf_page_init_for_read(
/* If buf_buddy_alloc() allocated storage from the LRU list,
it released and reacquired buf_pool->mutex. Thus, we must
check the page_hash again, as it may have been modified. */
- if (UNIV_UNLIKELY(lru)) {
-
- watch_page = buf_page_hash_get_low(buf_pool, page_id);
-
- if (UNIV_UNLIKELY(watch_page
- && !buf_pool_watch_is_sentinel(buf_pool,
- watch_page))) {
-
- /* The block was added by some other thread. */
- rw_lock_x_unlock(hash_lock);
- watch_page = NULL;
- buf_buddy_free(buf_pool, data,
- page_size.physical());
+ if (UNIV_UNLIKELY(lru)
+ && buf_page_hash_get_low(buf_pool, page_id)) {
+ /* The block was added by some other thread. */
+ rw_lock_x_unlock(hash_lock);
+ buf_buddy_free(buf_pool, data, page_size.physical());
- bpage = NULL;
- goto func_exit;
- }
+ bpage = NULL;
+ goto func_exit;
}
bpage = buf_page_alloc_descriptor();
@@ -5543,21 +5183,6 @@ buf_page_init_for_read(
ut_d(bpage->in_page_hash = TRUE);
- if (watch_page != NULL) {
-
- /* Preserve the reference count. */
- ib_uint32_t buf_fix_count;
-
- buf_fix_count = watch_page->buf_fix_count;
-
- ut_a(buf_fix_count > 0);
-
- my_atomic_add32((int32*) &bpage->buf_fix_count, buf_fix_count);
-
- ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page));
- buf_pool_watch_remove(buf_pool, watch_page);
- }
-
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
bpage->id.fold(), bpage);
@@ -5624,8 +5249,7 @@ buf_page_create(
block = (buf_block_t*) buf_page_hash_get_low(buf_pool, page_id);
if (block
- && buf_page_in_file(&block->page)
- && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
+ && buf_page_in_file(&block->page)) {
ut_d(block->page.file_page_was_freed = FALSE);
/* Page can be found in buf_pool */
@@ -6422,7 +6046,6 @@ buf_pool_validate_instance(
buf_page_mutex_enter(block);
switch (buf_block_get_state(block)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
/* These should only occur on
@@ -6533,8 +6156,7 @@ assert_s_latched:
ut_a(b->oldest_modification);
n_flush++;
- switch (buf_page_get_state(b)) {
- case BUF_BLOCK_ZIP_DIRTY:
+ if (buf_page_get_state(b) == BUF_BLOCK_ZIP_DIRTY) {
n_lru++;
n_zip++;
switch (buf_page_get_io_fix(b)) {
@@ -6558,18 +6180,8 @@ assert_s_latched:
}
break;
}
- break;
- case BUF_BLOCK_FILE_PAGE:
- /* uncompressed page */
- break;
- case BUF_BLOCK_POOL_WATCH:
- case BUF_BLOCK_ZIP_PAGE:
- case BUF_BLOCK_NOT_USED:
- case BUF_BLOCK_READY_FOR_USE:
- case BUF_BLOCK_MEMORY:
- case BUF_BLOCK_REMOVE_HASH:
- ut_error;
- break;
+ } else {
+ ut_ad(buf_page_get_state(b) == BUF_BLOCK_FILE_PAGE);
}
ut_a(buf_page_hash_get_low(buf_pool, b->id) == b);
}
@@ -6808,25 +6420,15 @@ buf_get_latched_pages_number_instance(
for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
-
- switch (buf_page_get_state(b)) {
- case BUF_BLOCK_ZIP_DIRTY:
+ ut_ad(buf_page_get_state(b) == BUF_BLOCK_ZIP_DIRTY
+ || buf_page_get_state(b) == BUF_BLOCK_FILE_PAGE);
+ if (buf_page_get_state(b) == BUF_BLOCK_ZIP_DIRTY) {
if (b->buf_fix_count != 0
|| buf_page_get_io_fix(b) != BUF_IO_NONE) {
fixed_pages_number++;
}
- break;
- case BUF_BLOCK_FILE_PAGE:
- /* uncompressed page */
- break;
- case BUF_BLOCK_POOL_WATCH:
- case BUF_BLOCK_ZIP_PAGE:
- case BUF_BLOCK_NOT_USED:
- case BUF_BLOCK_READY_FOR_USE:
- case BUF_BLOCK_MEMORY:
- case BUF_BLOCK_REMOVE_HASH:
- ut_error;
- break;
+ } else {
+ ut_ad(buf_page_get_state(b) == BUF_BLOCK_FILE_PAGE);
}
}
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 74df5ee2de8..83201fdac80 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -663,7 +663,6 @@ buf_flush_remove(
buf_pool->flush_hp.adjust(bpage);
switch (buf_page_get_state(bpage)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE:
/* Clean compressed pages should not be on the flush list */
case BUF_BLOCK_NOT_USED:
@@ -1049,7 +1048,6 @@ buf_flush_write_block_low(
}
switch (buf_page_get_state(bpage)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE: /* The page should be dirty. */
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 8673c8d9d72..7a0a47d3aa1 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -1990,7 +1990,6 @@ buf_LRU_block_remove_hashed(
bpage->size.physical());
}
break;
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
@@ -2128,7 +2127,6 @@ buf_LRU_block_remove_hashed(
return(true);
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
@@ -2362,7 +2360,6 @@ buf_LRU_validate_instance(
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
switch (buf_page_get_state(bpage)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 5ff36aa9c24..628eeac4244 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -460,8 +460,8 @@ static const char* innobase_change_buffering_values[IBUF_USE_COUNT] = {
"inserts", /* IBUF_USE_INSERT */
"deletes", /* IBUF_USE_DELETE_MARK */
"changes", /* IBUF_USE_INSERT_DELETE_MARK */
- "purges", /* IBUF_USE_DELETE */
- "all" /* IBUF_USE_ALL */
+ "purges", /* IBUF_USE_DELETE (same as IBUF_USE_DELETE_MARK) */
+ "all" /* IBUF_USE_ALL (same as IBUF_USE_INSERT_DELETE_MARK) */
};
/** Retrieve the FTS Relevance Ranking result for doc with doc_id
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index fae634630fd..c8e556ee2dc 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -4930,7 +4930,6 @@ i_s_innodb_buffer_page_fill(
/* First three states are for compression pages and
are not states we would get as we scan pages through
buffer blocks */
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
state_str = NULL;
@@ -5657,7 +5656,6 @@ i_s_innodb_buf_page_lru_fill(
state_str = "NO";
break;
/* We should not see following states */
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_MEMORY:
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 5581d1d1093..019302b54e9 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -2727,69 +2727,23 @@ ibuf_contract_after_insert(
} while (size > 0 && sum_sizes < entry_size);
}
-/*********************************************************************//**
-Determine if an insert buffer record has been encountered already.
-@return TRUE if a new record, FALSE if possible duplicate */
-static
-ibool
-ibuf_get_volume_buffered_hash(
-/*==========================*/
- const rec_t* rec, /*!< in: ibuf record in post-4.1 format */
- const byte* types, /*!< in: fields */
- const byte* data, /*!< in: start of user record data */
- ulint comp, /*!< in: 0=ROW_FORMAT=REDUNDANT,
- nonzero=ROW_FORMAT=COMPACT */
- ulint* hash, /*!< in/out: hash array */
- ulint size) /*!< in: number of elements in hash array */
-{
- ulint len;
- ulint fold;
- ulint bitmask;
-
- len = ibuf_rec_get_size(
- rec, types,
- rec_get_n_fields_old(rec) - IBUF_REC_FIELD_USER, comp);
- fold = ut_fold_binary(data, len);
-
- hash += (fold / (CHAR_BIT * sizeof *hash)) % size;
- bitmask = static_cast<ulint>(1) << (fold % (CHAR_BIT * sizeof(*hash)));
-
- if (*hash & bitmask) {
-
- return(FALSE);
- }
-
- /* We have not seen this record yet. Insert it. */
- *hash |= bitmask;
-
- return(TRUE);
-}
-
#ifdef UNIV_DEBUG
-# define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs) \
- ibuf_get_volume_buffered_count_func(mtr,rec,hash,size,n_recs)
+# define ibuf_get_volume_buffered_count(mtr, rec) \
+ ibuf_get_volume_buffered_count_func(mtr, rec)
#else /* UNIV_DEBUG */
-# define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs) \
- ibuf_get_volume_buffered_count_func(rec,hash,size,n_recs)
+# define ibuf_get_volume_buffered_count(mtr, rec) \
+ ibuf_get_volume_buffered_count_func(rec)
#endif /* UNIV_DEBUG */
-/*********************************************************************//**
-Update the estimate of the number of records on a page, and
-get the space taken by merging the buffered record to the index page.
+/** Determine the space taken by merging the buffered record to the index page.
+@param rec change buffer record
@return size of index record in bytes + an upper limit of the space
taken in the page directory */
-static
-ulint
-ibuf_get_volume_buffered_count_func(
-/*================================*/
+static ulint ibuf_get_volume_buffered_count_func(
#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec, /*!< in: insert buffer record */
- ulint* hash, /*!< in/out: hash array */
- ulint size, /*!< in: number of elements in hash array */
- lint* n_recs) /*!< in/out: estimated number of records
- on the page that rec points to */
+ mtr_t* mtr, /*!< mini-transaction */
+#endif
+ const rec_t* rec)
{
ulint len;
ibuf_op_t ibuf_op;
@@ -2825,21 +2779,12 @@ ibuf_get_volume_buffered_count_func(
default:
ut_error;
case 0:
- /* This ROW_TYPE=REDUNDANT record does not include an
- operation counter. Exclude it from the *n_recs,
- because deletes cannot be buffered if there are
- old-style inserts buffered for the page. */
-
len = ibuf_rec_get_size(rec, types, n_fields, 0);
return(len
+ rec_get_converted_extra_size(len, n_fields, 0)
+ page_dir_calc_reserved_space(1));
case 1:
- /* This ROW_TYPE=COMPACT record does not include an
- operation counter. Exclude it from the *n_recs,
- because deletes cannot be buffered if there are
- old-style inserts buffered for the page. */
goto get_volume_comp;
case IBUF_REC_INFO_SIZE:
@@ -2849,35 +2794,14 @@ ibuf_get_volume_buffered_count_func(
switch (ibuf_op) {
case IBUF_OP_INSERT:
- /* Inserts can be done by updating a delete-marked record.
- Because delete-mark and insert operations can be pointing to
- the same records, we must not count duplicates. */
- case IBUF_OP_DELETE_MARK:
- /* There must be a record to delete-mark.
- See if this record has been already buffered. */
- if (n_recs && ibuf_get_volume_buffered_hash(
- rec, types + IBUF_REC_INFO_SIZE,
- types + len,
- types[IBUF_REC_OFFSET_FLAGS] & IBUF_REC_COMPACT,
- hash, size)) {
- (*n_recs)++;
- }
-
- if (ibuf_op == IBUF_OP_DELETE_MARK) {
- /* Setting the delete-mark flag does not
- affect the available space on the page. */
- return(0);
- }
break;
+ case IBUF_OP_DELETE_MARK:
+ return 0;
case IBUF_OP_DELETE:
- /* A record will be removed from the page. */
- if (n_recs) {
- (*n_recs)--;
- }
/* While deleting a record actually frees up space,
we have to play it safe and pretend that it takes no
additional space (the record might not exist, etc.). */
- return(0);
+ return 0;
default:
ut_error;
}
@@ -2920,9 +2844,6 @@ ibuf_get_volume_buffered(
or BTR_MODIFY_TREE */
ulint space, /*!< in: space id */
ulint page_no,/*!< in: page number of an index page */
- lint* n_recs, /*!< in/out: minimum number of records on the
- page after the buffered changes have been
- applied, or NULL to disable the counting */
mtr_t* mtr) /*!< in: mini-transaction of pcur */
{
ulint volume;
@@ -2932,8 +2853,6 @@ ibuf_get_volume_buffered(
const page_t* prev_page;
ulint next_page_no;
const page_t* next_page;
- /* bitmap of buffered recs */
- ulint hash_bitmap[128 / sizeof(ulint)];
ut_ad((pcur->latch_mode == BTR_MODIFY_PREV)
|| (pcur->latch_mode == BTR_MODIFY_TREE));
@@ -2943,10 +2862,6 @@ ibuf_get_volume_buffered(
volume = 0;
- if (n_recs) {
- memset(hash_bitmap, 0, sizeof hash_bitmap);
- }
-
rec = btr_pcur_get_rec(pcur);
page = page_align(rec);
ut_ad(page_validate(page, ibuf->index));
@@ -2965,9 +2880,7 @@ ibuf_get_volume_buffered(
goto count_later;
}
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+ volume += ibuf_get_volume_buffered_count(mtr, rec);
}
/* Look at the previous page */
@@ -3017,9 +2930,7 @@ ibuf_get_volume_buffered(
goto count_later;
}
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+ volume += ibuf_get_volume_buffered_count(mtr, rec);
}
count_later:
@@ -3037,9 +2948,7 @@ count_later:
return(volume);
}
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+ volume += ibuf_get_volume_buffered_count(mtr, rec);
}
/* Look at the next page */
@@ -3087,9 +2996,7 @@ count_later:
return(volume);
}
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+ volume += ibuf_get_volume_buffered_count(mtr, rec);
}
}
@@ -3305,7 +3212,6 @@ ibuf_insert_low(
mem_heap_t* heap;
ulint* offsets = NULL;
ulint buffered;
- lint min_n_recs;
rec_t* ins_rec;
ibool old_bit_value;
page_t* bitmap_page;
@@ -3323,7 +3229,7 @@ ibuf_insert_low(
ut_ad(!dict_index_is_spatial(index));
ut_ad(dtuple_check_typed(entry));
ut_ad(!no_counter || op == IBUF_OP_INSERT);
- ut_a(op < IBUF_OP_COUNT);
+ ut_ad(op == IBUF_OP_INSERT || op == IBUF_OP_DELETE_MARK);
do_merge = FALSE;
@@ -3395,42 +3301,9 @@ ibuf_insert_low(
/* Find out the volume of already buffered inserts for the same index
page */
- min_n_recs = 0;
buffered = ibuf_get_volume_buffered(&pcur,
page_id.space(),
- page_id.page_no(),
- op == IBUF_OP_DELETE
- ? &min_n_recs
- : NULL, &mtr);
-
- if (op == IBUF_OP_DELETE
- && (min_n_recs < 2 || buf_pool_watch_occurred(page_id))) {
- /* The page could become empty after the record is
- deleted, or the page has been read in to the buffer
- pool. Refuse to buffer the operation. */
-
- /* The buffer pool watch is needed for IBUF_OP_DELETE
- because of latching order considerations. We can
- check buf_pool_watch_occurred() only after latching
- the insert buffer B-tree pages that contain buffered
- changes for the page. We never buffer IBUF_OP_DELETE,
- unless some IBUF_OP_INSERT or IBUF_OP_DELETE_MARK have
- been previously buffered for the page. Because there
- are buffered operations for the page, the insert
- buffer B-tree page latches held by mtr will guarantee
- that no changes for the user page will be merged
- before mtr_commit(&mtr). We must not mtr_commit(&mtr)
- until after the IBUF_OP_DELETE has been buffered. */
-
-fail_exit:
- if (BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) {
- mutex_exit(&ibuf_mutex);
- mutex_exit(&ibuf_pessimistic_insert_mutex);
- }
-
- err = DB_STRONG_FAIL;
- goto func_exit;
- }
+ page_id.page_no(), &mtr);
/* After this point, the page could still be loaded to the
buffer pool, but we do not have to care about it, since we are
@@ -3454,7 +3327,14 @@ fail_exit:
page_id.page_no())) {
ibuf_mtr_commit(&bitmap_mtr);
- goto fail_exit;
+fail_exit:
+ if (BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) {
+ mutex_exit(&ibuf_mutex);
+ mutex_exit(&ibuf_pessimistic_insert_mutex);
+ }
+
+ err = DB_STRONG_FAIL;
+ goto func_exit;
}
if (op == IBUF_OP_INSERT) {
@@ -3652,7 +3532,7 @@ ibuf_insert(
case IBUF_USE_INSERT:
case IBUF_USE_INSERT_DELETE_MARK:
case IBUF_USE_ALL:
- goto check_watch;
+ goto mode_ok;
case IBUF_USE_COUNT:
break;
}
@@ -3667,63 +3547,21 @@ ibuf_insert(
case IBUF_USE_INSERT_DELETE_MARK:
case IBUF_USE_ALL:
ut_ad(!no_counter);
- goto check_watch;
+ goto mode_ok;
case IBUF_USE_COUNT:
break;
}
break;
case IBUF_OP_DELETE:
- switch (use) {
- case IBUF_USE_NONE:
- case IBUF_USE_INSERT:
- case IBUF_USE_INSERT_DELETE_MARK:
- DBUG_RETURN(FALSE);
- case IBUF_USE_DELETE_MARK:
- case IBUF_USE_DELETE:
- case IBUF_USE_ALL:
- ut_ad(!no_counter);
- goto skip_watch;
- case IBUF_USE_COUNT:
- break;
- }
- break;
case IBUF_OP_COUNT:
break;
}
/* unknown op or use */
- ut_error;
-
-check_watch:
- /* If a thread attempts to buffer an insert on a page while a
- purge is in progress on the same page, the purge must not be
- buffered, because it could remove a record that was
- re-inserted later. For simplicity, we block the buffering of
- all operations on a page that has a purge pending.
-
- We do not check this in the IBUF_OP_DELETE case, because that
- would always trigger the buffer pool watch during purge and
- thus prevent the buffering of delete operations. We assume
- that the issuer of IBUF_OP_DELETE has called
- buf_pool_watch_set(space, page_no). */
-
- {
- buf_pool_t* buf_pool = buf_pool_get(page_id);
- buf_page_t* bpage
- = buf_page_get_also_watch(buf_pool, page_id);
-
- if (bpage != NULL) {
- /* A buffer pool watch has been set or the
- page has been read into the buffer pool.
- Do not buffer the request. If a purge operation
- is being buffered, have this request executed
- directly on the page in the buffer pool after the
- buffered entries for this page have been merged. */
- DBUG_RETURN(FALSE);
- }
- }
+ ut_ad(!"invalid mode");
+ DBUG_RETURN(false);
-skip_watch:
+mode_ok:
entry_size = rec_get_converted_size(index, entry, 0);
if (entry_size
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index e00b2545708..488e1b8a8f6 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -70,7 +70,7 @@ enum btr_latch_mode {
/** Continue searching the entire B-tree. */
BTR_CONT_SEARCH_TREE = 38,
- /* BTR_INSERT, BTR_DELETE and BTR_DELETE_MARK are mutually
+ /* BTR_INSERT and BTR_DELETE_MARK are mutually
exclusive. */
/** The search tuple will be inserted to the secondary index
at the searched position. When the leaf page is not in the
@@ -82,10 +82,6 @@ enum btr_latch_mode {
not in the buffer pool. */
BTR_DELETE_MARK = 4096,
- /** Try to purge the record using the change buffer when the
- secondary index leaf page is not in the buffer pool. */
- BTR_DELETE = 8192,
-
/** The caller is already holding dict_index_t::lock S-latch. */
BTR_ALREADY_S_LATCHED = 16384,
/** Search and S-latch a leaf page, assuming that the
@@ -108,11 +104,10 @@ enum btr_latch_mode {
BTR_DELETE_MARK_LEAF_ALREADY_S_LATCHED = BTR_DELETE_MARK_LEAF
| BTR_ALREADY_S_LATCHED,
/** Attempt to purge a secondary index record. */
- BTR_PURGE_LEAF = BTR_MODIFY_LEAF | BTR_DELETE,
+ BTR_PURGE_LEAF = BTR_MODIFY_LEAF,
/** Attempt to purge a secondary index record
while holding the dict_index_t::lock S-latch. */
- BTR_PURGE_LEAF_ALREADY_S_LATCHED = BTR_PURGE_LEAF
- | BTR_ALREADY_S_LATCHED,
+ BTR_PURGE_LEAF_ALREADY_S_LATCHED = BTR_MODIFY_LEAF_ALREADY_S_LATCHED,
/** In the case of BTR_MODIFY_TREE, the caller specifies
the intention to delete record only. It is used to optimize
@@ -153,7 +148,6 @@ record is in spatial index */
| BTR_DELETE_MARK \
| BTR_RTREE_UNDO_INS \
| BTR_RTREE_DELETE_MARK \
- | BTR_DELETE \
| BTR_ESTIMATE \
| BTR_IGNORE_SEC_UNIQUE \
| BTR_ALREADY_S_LATCHED \
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 4d67833db70..9475ee0a84b 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -169,7 +169,7 @@ btr_cur_search_to_nth_level(
search the position! */
ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ..., ORed with
at most one of BTR_INSERT, BTR_DELETE_MARK,
- BTR_DELETE, or BTR_ESTIMATE;
+ or BTR_ESTIMATE;
cursor->left_block is used to store a pointer
to the left neighbor page, in the cases
BTR_SEARCH_PREV and BTR_MODIFY_PREV;
@@ -855,9 +855,8 @@ enum btr_cur_method {
the insert buffer */
BTR_CUR_DEL_MARK_IBUF, /*!< performed the intended delete
mark in the insert/delete buffer */
- BTR_CUR_DELETE_IBUF, /*!< performed the intended delete in
+ BTR_CUR_DELETE_IBUF /*!< performed the intended delete in
the insert/delete buffer */
- BTR_CUR_DELETE_REF /*!< row_purge_poss_sec() failed */
};
/** The tree cursor: the definition appears here only for the compiler
@@ -865,7 +864,6 @@ to know struct size! */
struct btr_cur_t {
dict_index_t* index; /*!< index where positioned */
page_cur_t page_cur; /*!< page cursor */
- purge_node_t* purge_node; /*!< purge node, for BTR_DELETE */
buf_block_t* left_block; /*!< this field is used to store
a pointer to the left neighbor
page, in the cases
@@ -935,7 +933,6 @@ struct btr_cur_t {
{
index = NULL;
memset(&page_cur, 0, sizeof page_cur);
- purge_node = NULL;
left_block = NULL;
thr = NULL;
flag = btr_cur_method(0);
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 6bfeeb83018..318ae189aab 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -59,10 +59,6 @@ struct fil_addr_t;
it is error-prone programming
not to set a latch, and it
should be used with care */
-#define BUF_GET_IF_IN_POOL_OR_WATCH 15
- /*!< Get the page only if it's in the
- buffer pool, if not then set a watch
- on the page. */
#define BUF_GET_POSSIBLY_FREED 16
/*!< Like BUF_GET, but do not mind
if the file page has been freed. */
@@ -86,9 +82,6 @@ struct fil_addr_t;
/*!< The maximum number of buffer
pools that can be defined */
-#define BUF_POOL_WATCH_SIZE (srv_n_purge_threads + 1)
- /*!< Maximum number of concurrent
- buffer pool watches */
#define MAX_PAGE_HASH_LOCKS 1024 /*!< The maximum number of
page_hash locks */
@@ -112,8 +105,6 @@ extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
The enumeration values must be 0..7. */
enum buf_page_state {
- BUF_BLOCK_POOL_WATCH, /*!< a sentinel for the buffer pool
- watch, element of buf_pool->watch[] */
BUF_BLOCK_ZIP_PAGE, /*!< contains a clean
compressed page */
BUF_BLOCK_ZIP_DIRTY, /*!< contains a compressed
@@ -439,7 +430,7 @@ buf_page_get_zip(
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
+BUF_PEEK_IF_IN_POOL, or BUF_GET_NO_LATCH
@param[in] file file name
@param[in] line line where called
@param[in] mtr mini-transaction
@@ -1220,17 +1211,14 @@ found, NULL otherwise. If NULL is passed then the hash_lock is released by
this function.
@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if
lock == NULL
-@param[in] watch if true, return watch sentinel also.
-@return pointer to the bpage or NULL; if NULL, lock is also NULL or
-a watch sentinel. */
+@return pointer to the bpage or NULL; if NULL, lock is also NULL. */
UNIV_INLINE
buf_page_t*
buf_page_hash_get_locked(
buf_pool_t* buf_pool,
const page_id_t page_id,
rw_lock_t** lock,
- ulint lock_mode,
- bool watch = false);
+ ulint lock_mode);
/** Returns the control block of a file page, NULL if not found.
If the block is found and lock is not NULL then the appropriate
@@ -1267,9 +1255,6 @@ buf_page_hash_get_low() function.
buf_page_hash_get_locked(b, page_id, l, RW_LOCK_X)
#define buf_page_hash_get(b, page_id) \
buf_page_hash_get_locked(b, page_id, NULL, 0)
-#define buf_page_get_also_watch(b, page_id) \
- buf_page_hash_get_locked(b, page_id, NULL, 0, true)
-
#define buf_block_hash_get_s_locked(b, page_id, l) \
buf_block_hash_get_locked(b, page_id, l, RW_LOCK_S)
#define buf_block_hash_get_x_locked(b, page_id, l) \
@@ -1278,29 +1263,6 @@ buf_page_hash_get_low() function.
buf_block_hash_get_locked(b, page_id, NULL, 0)
/********************************************************************//**
-Determine if a block is a sentinel for a buffer pool watch.
-@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
-ibool
-buf_pool_watch_is_sentinel(
-/*=======================*/
- const buf_pool_t* buf_pool, /*!< buffer pool instance */
- const buf_page_t* bpage) /*!< in: block */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
-
-/** Stop watching if the page has been read in.
-buf_pool_watch_set(space,offset) must have returned NULL before.
-@param[in] page_id page id */
-void buf_pool_watch_unset(const page_id_t page_id);
-
-/** Check if the page has been read in.
-This may only be called after buf_pool_watch_set(space,offset)
-has returned NULL and before invoking buf_pool_watch_unset(space,offset).
-@param[in] page_id page id
-@return FALSE if the given page was not read in, TRUE if it was */
-bool buf_pool_watch_occurred(const page_id_t page_id)
-MY_ATTRIBUTE((warn_unused_result));
-
-/********************************************************************//**
Get total buffer pool statistics. */
void
buf_get_total_list_len(
@@ -1458,10 +1420,7 @@ public:
/* @} */
page_zip_des_t zip; /*!< compressed page; zip.data
(but not the data it points to) is
- also protected by buf_pool->mutex;
- state == BUF_BLOCK_ZIP_PAGE and
- zip.data == NULL means an active
- buf_pool->watch */
+ also protected by buf_pool->mutex */
ulint write_size; /* Write size is set when this
page is first time written and then
@@ -2168,19 +2127,13 @@ struct buf_pool_t{
UT_LIST_BASE_NODE_T(buf_buddy_free_t) zip_free[BUF_BUDDY_SIZES_MAX];
/*!< buddy free lists */
- buf_page_t* watch;
- /*!< Sentinel records for buffer
- pool watches. Protected by
- buf_pool->mutex. */
-
- buf_tmp_array_t* tmp_arr;
- /*!< Array for temporal memory
- used in compression and encryption */
-
#if BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN
# error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN"
#endif
/* @} */
+
+ /** buffers for page_compressed and encrypted page I/O */
+ buf_tmp_array_t* tmp_arr;
};
/** Print the given buf_pool_t object.
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 98026159b8a..95f083a6b7a 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -217,7 +217,6 @@ buf_page_get_state(
#ifdef UNIV_DEBUG
switch (state) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_NOT_USED:
@@ -258,8 +257,6 @@ buf_get_state_name(
enum buf_page_state state = buf_page_get_state(&block->page);
switch (state) {
- case BUF_BLOCK_POOL_WATCH:
- return (const char *) "BUF_BLOCK_POOL_WATCH";
case BUF_BLOCK_ZIP_PAGE:
return (const char *) "BUF_BLOCK_ZIP_PAGE";
case BUF_BLOCK_ZIP_DIRTY:
@@ -292,9 +289,6 @@ buf_page_set_state(
enum buf_page_state old_state = buf_page_get_state(bpage);
switch (old_state) {
- case BUF_BLOCK_POOL_WATCH:
- ut_error;
- break;
case BUF_BLOCK_ZIP_PAGE:
ut_a(state == BUF_BLOCK_ZIP_DIRTY);
break;
@@ -362,9 +356,6 @@ buf_page_in_file(
const buf_page_t* bpage) /*!< in: pointer to control block */
{
switch (buf_page_get_state(bpage)) {
- case BUF_BLOCK_POOL_WATCH:
- ut_error;
- break;
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_FILE_PAGE:
@@ -406,9 +397,6 @@ buf_page_get_mutex(
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
switch (buf_page_get_state(bpage)) {
- case BUF_BLOCK_POOL_WATCH:
- ut_error;
- return(NULL);
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
return(&buf_pool->zip_mutex);
@@ -726,11 +714,9 @@ buf_block_get_frame(
}
switch (buf_block_get_state(block)) {
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_NOT_USED:
- ut_error;
break;
case BUF_BLOCK_FILE_PAGE:
ut_a(block->page.buf_fix_count > 0);
@@ -1138,17 +1124,14 @@ found, NULL otherwise. If NULL is passed then the hash_lock is released by
this function.
@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if
lock == NULL
-@param[in] watch if true, return watch sentinel also.
-@return pointer to the bpage or NULL; if NULL, lock is also NULL or
-a watch sentinel. */
+@return pointer to the bpage or NULL; if NULL, lock is also NULL */
UNIV_INLINE
buf_page_t*
buf_page_hash_get_locked(
buf_pool_t* buf_pool,
const page_id_t page_id,
rw_lock_t** lock,
- ulint lock_mode,
- bool watch)
+ ulint lock_mode)
{
buf_page_t* bpage = NULL;
rw_lock_t* hash_lock;
@@ -1181,10 +1164,7 @@ buf_page_hash_get_locked(
bpage = buf_page_hash_get_low(buf_pool, page_id);
- if (!bpage || buf_pool_watch_is_sentinel(buf_pool, bpage)) {
- if (!watch) {
- bpage = NULL;
- }
+ if (!bpage) {
goto unlock_and_exit;
}
@@ -1306,7 +1286,6 @@ buf_page_release_zip(
buf_block_unfix(reinterpret_cast<buf_block_t*>(bpage));
return;
- case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index 09dc61496f3..3cb1957851c 100644
--- a/storage/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
@@ -38,28 +38,31 @@ of percentage of the buffer pool. */
/* Possible operations buffered in the insert/whatever buffer. See
ibuf_insert(). DO NOT CHANGE THE VALUES OF THESE, THEY ARE STORED ON DISK. */
-typedef enum {
+enum ibuf_op_t {
IBUF_OP_INSERT = 0,
IBUF_OP_DELETE_MARK = 1,
+ /** This one could exist in the change buffer after an upgrade */
IBUF_OP_DELETE = 2,
/* Number of different operation types. */
IBUF_OP_COUNT = 3
-} ibuf_op_t;
+};
/** Combinations of operations that can be buffered. Because the enum
values are used for indexing innobase_change_buffering_values[], they
should start at 0 and there should not be any gaps. */
-typedef enum {
+enum ibuf_use_t {
IBUF_USE_NONE = 0,
IBUF_USE_INSERT, /* insert */
IBUF_USE_DELETE_MARK, /* delete */
IBUF_USE_INSERT_DELETE_MARK, /* insert+delete */
- IBUF_USE_DELETE, /* delete+purge */
- IBUF_USE_ALL, /* insert+delete+purge */
+ /** same as IBUF_USE_DELETE_MARK */
+ IBUF_USE_DELETE,
+ /** same as IBUF_USE_INSERT_DELETE_MARK */
+ IBUF_USE_ALL,
IBUF_USE_COUNT /* number of entries in ibuf_use_t */
-} ibuf_use_t;
+};
/** Operations that can currently be buffered. */
extern ibuf_use_t ibuf_use;
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index 05774daac50..c6cd9d916d9 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -173,9 +173,7 @@ page_header_set_field(
{
ut_ad(page);
ut_ad(field <= PAGE_N_RECS);
-#if 0 /* FIXME: MDEV-19344 hits this */
ut_ad(field != PAGE_N_RECS || val);
-#endif
ut_ad(field == PAGE_N_HEAP || val < srv_page_size);
ut_ad(field != PAGE_N_HEAP || (val & 0x7fff) < srv_page_size);
diff --git a/storage/innobase/include/row0purge.h b/storage/innobase/include/row0purge.h
index c4ddff4243c..6dc7e6ec762 100644
--- a/storage/innobase/include/row0purge.h
+++ b/storage/innobase/include/row0purge.h
@@ -41,10 +41,10 @@ not delete marked version of a clustered index record where DB_TRX_ID
is newer than the purge view.
NOTE: This function should only be called by the purge thread, only
-while holding a latch on the leaf page of the secondary index entry
-(or keeping the buffer pool watch on the page). It is possible that
-this function first returns true and then false, if a user transaction
-inserts a record that the secondary index entry would refer to.
+while holding a latch on the leaf page of the secondary index entry.
+It is possible that this function first returns true and then false,
+if a user transaction inserts a record that the secondary index entry
+would refer to.
However, in that case, the user transaction would also re-insert the
secondary index entry after purge has removed it and released the leaf
page latch.
diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index 694604af408..61ba212e635 100644
--- a/storage/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2017, MariaDB Corporation.
+Copyright (c) 2016, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -334,13 +334,11 @@ row_parse_int(
enum row_search_result {
ROW_FOUND = 0, /*!< the record was found */
ROW_NOT_FOUND, /*!< record not found */
- ROW_BUFFERED, /*!< one of BTR_INSERT, BTR_DELETE, or
+ ROW_BUFFERED /*!< one of BTR_INSERT or
BTR_DELETE_MARK was specified, the
secondary index leaf page was not in
the buffer pool, and the operation was
enqueued in the insert/delete buffer */
- ROW_NOT_DELETED_REF /*!< BTR_DELETE was specified, and
- row_purge_poss_sec() failed */
};
/***************************************************************//**
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 6da08872a9a..86c3509af7f 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -1669,7 +1669,6 @@ row_log_table_apply_delete_low(
pcur, mtr);
#ifdef UNIV_DEBUG
switch (btr_pcur_get_btr_cur(pcur)->flag) {
- case BTR_CUR_DELETE_REF:
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
@@ -1751,7 +1750,6 @@ row_log_table_apply_delete(
&pcur, &mtr);
#ifdef UNIV_DEBUG
switch (btr_pcur_get_btr_cur(&pcur)->flag) {
- case BTR_CUR_DELETE_REF:
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
@@ -1895,7 +1893,6 @@ row_log_table_apply_update(
BTR_MODIFY_TREE, &pcur, &mtr);
#ifdef UNIV_DEBUG
switch (btr_pcur_get_btr_cur(&pcur)->flag) {
- case BTR_CUR_DELETE_REF:
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 560315af499..7abcf1c5021 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -260,10 +260,10 @@ not delete marked version of a clustered index record where DB_TRX_ID
is newer than the purge view.
NOTE: This function should only be called by the purge thread, only
-while holding a latch on the leaf page of the secondary index entry
-(or keeping the buffer pool watch on the page). It is possible that
-this function first returns true and then false, if a user transaction
-inserts a record that the secondary index entry would refer to.
+while holding a latch on the leaf page of the secondary index entry.
+It is possible that this function first returns true and then false,
+if a user transaction inserts a record that the secondary index entry
+would refer to.
However, in that case, the user transaction would also re-insert the
secondary index entry after purge has removed it and released the leaf
page latch.
@@ -431,10 +431,9 @@ row_purge_remove_sec_if_poss_tree(
case ROW_FOUND:
break;
case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
+ /* This is invalid, because the mode passed
to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+ flags BTR_INSERT or BTR_DELETE_MARK. */
ut_error;
}
@@ -553,16 +552,8 @@ row_purge_remove_sec_if_poss_leaf(
: BTR_PURGE_LEAF;
}
- /* Set the purge node for the call to row_purge_poss_sec(). */
- pcur.btr_cur.purge_node = node;
if (dict_index_is_spatial(index)) {
rw_lock_sx_lock(dict_index_get_lock(index));
- pcur.btr_cur.thr = NULL;
- } else {
- /* Set the query thread, so that ibuf_insert_low() will be
- able to invoke thd_get_trx(). */
- pcur.btr_cur.thr = static_cast<que_thr_t*>(
- que_node_get_parent(node));
}
search_result = row_search_index_entry(
@@ -651,8 +642,6 @@ row_purge_remove_sec_if_poss_leaf(
/* (The index entry is still needed,
or the deletion succeeded) */
/* fall through */
- case ROW_NOT_DELETED_REF:
- /* The index entry is still needed. */
case ROW_BUFFERED:
/* The deletion was buffered. */
case ROW_NOT_FOUND:
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index 3e65dc1d28b..8bf5918abaa 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -1071,10 +1071,6 @@ row_search_index_entry(
}
switch (btr_pcur_get_btr_cur(pcur)->flag) {
- case BTR_CUR_DELETE_REF:
- ut_a(mode & BTR_DELETE && !dict_index_is_spatial(index));
- return(ROW_NOT_DELETED_REF);
-
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index bc4e7d3c380..6192971bb99 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -242,10 +242,9 @@ row_undo_ins_remove_sec_low(
break;
case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
+ /* This is invalid, because the mode passed
to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+ flags BTR_INSERT or BTR_DELETE_MARK. */
ut_error;
}
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 3d4065cbab6..f4de6f5a482 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -484,10 +484,9 @@ row_undo_mod_del_mark_or_remove_sec_low(
case ROW_FOUND:
break;
case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
+ /* This is invalid, because the mode passed
to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+ flags BTR_INSERT or BTR_DELETE_MARK. */
ut_error;
}
@@ -667,10 +666,9 @@ try_again:
mem_heap_t* offsets_heap;
ulint* offsets;
case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
+ /* This is invalid, because the mode passed
to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+ flags BTR_INSERT or BTR_DELETE_MARK. */
ut_error;
case ROW_NOT_FOUND:
/* For spatial index, if first search didn't find an
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 021e3d69f8a..e27549244d3 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2403,9 +2403,6 @@ row_upd_sec_index_entry(
rec = btr_cur_get_rec(btr_cur);
switch (search_result) {
- case ROW_NOT_DELETED_REF: /* should only occur for BTR_DELETE */
- ut_error;
- break;
case ROW_BUFFERED:
/* Entry was delete marked already. */
break;