summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/mysqltest.cc2
-rw-r--r--mysql-test/r/trigger.result13
-rw-r--r--mysql-test/suite/perfschema/r/partition.result10
-rw-r--r--mysql-test/suite/perfschema/t/partition.test16
-rw-r--r--mysql-test/t/trigger.test14
-rw-r--r--sql/sql_admin.cc2
-rw-r--r--sql/sql_alter.cc7
-rw-r--r--sql/sql_parse.cc5
-rw-r--r--sql/sql_string.cc11
-rw-r--r--sql/sql_table.cc5
-rw-r--r--sql/sql_trigger.cc7
-rw-r--r--sql/table.cc3
-rw-r--r--storage/innobase/btr/btr0sea.cc20
-rw-r--r--storage/innobase/buf/buf0buf.cc5
-rw-r--r--storage/innobase/buf/buf0lru.cc70
-rw-r--r--storage/innobase/dict/dict0dict.cc27
-rw-r--r--storage/innobase/fil/fil0fil.cc12
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc5
-rw-r--r--storage/innobase/include/btr0sea.h14
-rw-r--r--storage/innobase/include/buf0lru.h18
-rw-r--r--storage/innobase/row/row0import.cc12
-rw-r--r--storage/innobase/row/row0mysql.cc14
-rw-r--r--storage/sphinx/ha_sphinx.cc11
-rw-r--r--storage/xtradb/btr/btr0sea.cc15
-rw-r--r--storage/xtradb/buf/buf0buf.cc8
-rw-r--r--storage/xtradb/buf/buf0lru.cc54
-rw-r--r--storage/xtradb/dict/dict0dict.cc34
-rw-r--r--storage/xtradb/fil/fil0fil.cc4
-rw-r--r--storage/xtradb/fsp/fsp0fsp.cc6
-rw-r--r--storage/xtradb/include/btr0sea.h13
-rw-r--r--storage/xtradb/include/buf0lru.h14
-rw-r--r--storage/xtradb/row/row0import.cc12
-rw-r--r--storage/xtradb/row/row0mysql.cc17
-rw-r--r--storage/xtradb/row/row0sel.cc35
34 files changed, 293 insertions, 222 deletions
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index a05305a85e3..b962813f558 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -948,6 +948,8 @@ pthread_handler_t connection_thread(void *arg)
end_thread:
cn->query_done= 1;
+ mysql_close(cn->mysql);
+ cn->mysql= 0;
mysql_thread_end();
pthread_exit(0);
return 0;
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index 701739fcfe5..53e331416ce 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -2345,7 +2345,18 @@ CREATE TABLE t1 (i INT);
insert into t2 value (2);
DROP VIEW v1;
DROP TABLE t1,t2,t3;
-End of 10.1 tests.
+#
+# MDEV-16093
+# Assertion `global_status_var.global_memory_used == 0' failed or
+# bytes lost after inserting into table with non-null blob and trigger
+#
+CREATE TABLE t1 (b BLOB NOT NULL);
+CREATE TRIGGER tr BEFORE UPDATE ON t1 FOR EACH ROW BEGIN END;
+INSERT INTO t1 VALUES ('foo');
+DROP TABLE t1;
+#
+# End of 10.1 tests.
+#
create table t1 (i int);
create trigger tr1 after insert on t1 for each row set @a=@a+1;
create trigger tr2 after insert on t1 for each row set @a=@a+1;
diff --git a/mysql-test/suite/perfschema/r/partition.result b/mysql-test/suite/perfschema/r/partition.result
new file mode 100644
index 00000000000..9bc624268bb
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/partition.result
@@ -0,0 +1,10 @@
+#
+# MDEV-10679
+# Server crashes in in mysql_create_frm_image upon query from
+# performance schema in ps-protocol mode
+#
+CREATE TABLE t1 (i INT);
+ALTER TABLE t1 ADD PARTITION (PARTITION p VALUES LESS THAN (1));
+ERROR HY000: Partition management on a not partitioned table is not possible
+SELECT * FROM performance_schema.events_stages_summary_by_user_by_event_name;
+DROP TABLE t1;
diff --git a/mysql-test/suite/perfschema/t/partition.test b/mysql-test/suite/perfschema/t/partition.test
new file mode 100644
index 00000000000..0b3b204dee7
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/partition.test
@@ -0,0 +1,16 @@
+--source include/have_perfschema.inc
+--source include/have_partition.inc
+
+--echo #
+--echo # MDEV-10679
+--echo # Server crashes in in mysql_create_frm_image upon query from
+--echo # performance schema in ps-protocol mode
+--echo #
+
+CREATE TABLE t1 (i INT);
+--error ER_PARTITION_MGMT_ON_NONPARTITIONED
+ALTER TABLE t1 ADD PARTITION (PARTITION p VALUES LESS THAN (1));
+--disable_result_log
+SELECT * FROM performance_schema.events_stages_summary_by_user_by_event_name;
+--enable_result_log
+DROP TABLE t1;
diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test
index 46a013d1bc9..b9e908e9944 100644
--- a/mysql-test/t/trigger.test
+++ b/mysql-test/t/trigger.test
@@ -2665,8 +2665,20 @@ insert into t2 value (2);
DROP VIEW v1;
DROP TABLE t1,t2,t3;
+--echo #
+--echo # MDEV-16093
+--echo # Assertion `global_status_var.global_memory_used == 0' failed or
+--echo # bytes lost after inserting into table with non-null blob and trigger
+--echo #
+
+CREATE TABLE t1 (b BLOB NOT NULL);
+CREATE TRIGGER tr BEFORE UPDATE ON t1 FOR EACH ROW BEGIN END;
+INSERT INTO t1 VALUES ('foo');
+DROP TABLE t1;
---echo End of 10.1 tests.
+--echo #
+--echo # End of 10.1 tests.
+--echo #
#
# MDEV-10915 Count number of executed triggers
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 49e14d6b013..a14a8cd0945 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -1216,7 +1216,9 @@ err:
}
close_thread_tables(thd); // Shouldn't be needed
thd->mdl_context.release_transactional_locks();
+#ifdef WITH_PARTITION_STORAGE_ENGINE
err2:
+#endif
thd->resume_subsequent_commits(suspended_wfc);
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc
index 3592fa3cfbd..3a9ba07d7a1 100644
--- a/sql/sql_alter.cc
+++ b/sql/sql_alter.cc
@@ -263,8 +263,8 @@ bool Sql_cmd_alter_table::execute(THD *thd)
- For temporary MERGE tables we do not track if their child tables are
base or temporary. As result we can't guarantee that privilege check
- which was done in presence of temporary child will stay relevant later
- as this temporary table might be removed.
+ which was done in presence of temporary child will stay relevant
+ later as this temporary table might be removed.
If SELECT_ACL | UPDATE_ACL | DELETE_ACL privileges were not checked for
the underlying *base* tables, it would create a security breach as in
@@ -304,6 +304,9 @@ bool Sql_cmd_alter_table::execute(THD *thd)
create_info.data_file_name= create_info.index_file_name= NULL;
thd->enable_slow_log= opt_log_slow_admin_statements;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ thd->work_part_info= 0;
+#endif
#ifdef WITH_WSREP
if ((!thd->is_current_stmt_binlog_format_row() ||
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 1c36bd7bcdf..a5973b9b08d 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2962,10 +2962,6 @@ mysql_execute_command(THD *thd)
#endif
DBUG_ENTER("mysql_execute_command");
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- thd->work_part_info= 0;
-#endif
-
DBUG_ASSERT(thd->transaction.stmt.is_empty() || thd->in_sub_stmt);
/*
Each statement or replication event which might produce deadlock
@@ -3815,6 +3811,7 @@ mysql_execute_command(THD *thd)
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
+ thd->work_part_info= 0;
{
partition_info *part_info= thd->lex->part_info;
if (part_info && !(part_info= part_info->get_clone(thd)))
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 64661f46a49..beff50bd1c5 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -187,7 +187,16 @@ bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs)
if (alloc(arg_length))
return TRUE;
DBUG_ASSERT(arg_length < UINT_MAX32);
- if ((str_length=arg_length))
+ if (Ptr == str && arg_length == uint32(str_length))
+ {
+ /*
+ This can happen in some cases. This code is here mainly to avoid
+ warnings from valgrind, but can also be an indication of error.
+ */
+ DBUG_PRINT("warning", ("Copying string on itself: %p %zu",
+ str, arg_length));
+ }
+ else if ((str_length=uint32(arg_length)))
memcpy(Ptr,str,arg_length);
Ptr[arg_length]=0;
str_charset=cs;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 853535f3d51..dd21c8fb107 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6123,6 +6123,7 @@ remove_key:
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
+ DBUG_ASSERT(thd->work_part_info == 0);
partition_info *tab_part_info= table->part_info;
thd->work_part_info= thd->lex->part_info;
if (tab_part_info)
@@ -8719,6 +8720,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
DBUG_ENTER("mysql_alter_table");
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ thd->work_part_info= 0; // Used by partitioning
+#endif
+
/*
Check if we attempt to alter mysql.slow_log or
mysql.general_log table and return an error if
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 8f76e7a537e..32dc1dd15e0 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -623,6 +623,7 @@ end:
#endif /* WITH_WSREP */
}
+
/**
Build stmt_query to write it in the bin-log, the statement to write in
the trigger file and the trigger definer.
@@ -1182,6 +1183,12 @@ Table_triggers_list::~Table_triggers_list()
}
}
}
+
+ /* Free blobs used in insert */
+ if (record0_field)
+ for (Field **fld_ptr= record0_field; *fld_ptr; fld_ptr++)
+ (*fld_ptr)->free();
+
if (record1_field)
for (Field **fld_ptr= record1_field; *fld_ptr; fld_ptr++)
delete *fld_ptr;
diff --git a/sql/table.cc b/sql/table.cc
index 040eec0d2e8..12fc10c3259 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -2663,6 +2663,9 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
goto ret;
thd->lex->create_info.db_type= hton;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ thd->work_part_info= 0; // For partitioning
+#endif
if (tabledef_version.str)
thd->lex->create_info.tabledef_version= tabledef_version;
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 750c2506ff5..278062afa78 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2018, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1177,7 +1177,8 @@ retry:
#endif
ut_ad(btr_search_enabled);
- ut_ad(block->page.id.space() == index->space);
+ ut_ad(index->space == FIL_NULL
+ || block->page.id.space() == index->space);
ut_a(index_id == index->id);
ut_a(!dict_index_is_ibuf(index));
#ifdef UNIV_DEBUG
@@ -1300,15 +1301,10 @@ cleanup:
ut_free(folds);
}
-/** Drop any adaptive hash index entries that may point to an index
-page that may be in the buffer pool, when a page is evicted from the
-buffer pool or freed in a file segment.
-@param[in] page_id page id
-@param[in] page_size page size */
-void
-btr_search_drop_page_hash_when_freed(
- const page_id_t& page_id,
- const page_size_t& page_size)
+/** Drop possible adaptive hash index entries when a page is evicted
+from the buffer pool or freed in a file, or the index is being dropped.
+@param[in] page_id page id */
+void btr_search_drop_page_hash_when_freed(const page_id_t& page_id)
{
buf_block_t* block;
mtr_t mtr;
@@ -1324,7 +1320,7 @@ btr_search_drop_page_hash_when_freed(
are possibly holding, we cannot s-latch the page, but must
(recursively) x-latch it, even though we are only reading. */
- block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, NULL,
+ block = buf_page_get_gen(page_id, univ_page_size, RW_X_LATCH, NULL,
BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
&mtr, &err);
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 2f385b6f8e6..845bb17bfa5 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -4203,10 +4203,12 @@ buf_page_get_gen(
#ifdef UNIV_DEBUG
switch (mode) {
case BUF_EVICT_IF_IN_POOL:
+ case BUF_PEEK_IF_IN_POOL:
/* After DISCARD TABLESPACE, the tablespace would not exist,
but in IMPORT TABLESPACE, PageConverter::operator() must
replace any old pages, which were not evicted during DISCARD.
- Skip the assertion on space_page_size. */
+ Similarly, btr_search_drop_page_hash_when_freed() must
+ remove any old pages. Skip the assertion on page_size. */
break;
default:
ut_error;
@@ -4215,7 +4217,6 @@ buf_page_get_gen(
/* fall through */
case BUF_GET:
case BUF_GET_IF_IN_POOL:
- case BUF_PEEK_IF_IN_POOL:
case BUF_GET_IF_IN_POOL_OR_WATCH:
case BUF_GET_POSSIBLY_FREED:
bool found;
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index cdea41ff191..60970331b25 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -224,30 +224,21 @@ buf_LRU_evict_from_unzip_LRU(
/** Attempts to drop page hash index on a batch of pages belonging to a
particular space id.
@param[in] space_id space id
-@param[in] page_size page size
@param[in] arr array of page_no
@param[in] count number of entries in array */
static
void
-buf_LRU_drop_page_hash_batch(
- ulint space_id,
- const page_size_t& page_size,
- const ulint* arr,
- ulint count)
+buf_LRU_drop_page_hash_batch(ulint space_id, const ulint* arr, ulint count)
{
ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
- for (ulint i = 0; i < count; ++i, ++arr) {
+ for (const ulint* const end = arr + count; arr != end; ) {
/* While our only caller
buf_LRU_drop_page_hash_for_tablespace()
is being executed for DROP TABLE or similar,
- the table cannot be evicted from the buffer pool.
- Note: this should not be executed for DROP TABLESPACE,
- because DROP TABLESPACE would be refused if tables existed
- in the tablespace, and a previous DROP TABLE would have
- already removed the AHI entries. */
+ the table cannot be evicted from the buffer pool. */
btr_search_drop_page_hash_when_freed(
- page_id_t(space_id, *arr), page_size);
+ page_id_t(space_id, *arr++));
}
}
@@ -263,15 +254,6 @@ buf_LRU_drop_page_hash_for_tablespace(
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint id) /*!< in: space id */
{
- bool found;
- const page_size_t page_size(fil_space_get_page_size(id, &found));
-
- if (!found) {
- /* Somehow, the tablespace does not exist. Nothing to drop. */
- ut_ad(0);
- return;
- }
-
ulint* page_arr = static_cast<ulint*>(ut_malloc_nokey(
sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE));
@@ -338,8 +320,7 @@ next_page:
the latching order. */
buf_pool_mutex_exit(buf_pool);
- buf_LRU_drop_page_hash_batch(
- id, page_size, page_arr, num_entries);
+ buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
num_entries = 0;
@@ -371,9 +352,31 @@ next_page:
buf_pool_mutex_exit(buf_pool);
/* Drop any remaining batch of search hashed pages. */
- buf_LRU_drop_page_hash_batch(id, page_size, page_arr, num_entries);
+ buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
ut_free(page_arr);
}
+
+/** Drop the adaptive hash index for a tablespace.
+@param[in,out] table table */
+void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+{
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+ if (btr_search_info_get_ref_count(btr_search_get_info(index),
+ index)) {
+ goto drop_ahi;
+ }
+ }
+
+ return;
+drop_ahi:
+ ulint id = table->space;
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
+ buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
+ id);
+ }
+}
#endif /* BTR_CUR_HASH_ADAPT */
/******************************************************************//**
@@ -697,26 +700,13 @@ buf_flush_dirty_pages(
@param[in] id tablespace identifier
@param[in] observer flush observer,
or NULL if nothing is to be written */
-void
-buf_LRU_flush_or_remove_pages(
- ulint id,
- FlushObserver* observer
-#ifdef BTR_CUR_HASH_ADAPT
- , bool drop_ahi /*!< whether to drop the adaptive hash index */
-#endif /* BTR_CUR_HASH_ADAPT */
- )
+void buf_LRU_flush_or_remove_pages(ulint id, FlushObserver* observer)
{
/* Pages in the system tablespace must never be discarded. */
ut_ad(id || observer);
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
- buf_pool_t* buf_pool = buf_pool_from_array(i);
-#ifdef BTR_CUR_HASH_ADAPT
- if (drop_ahi) {
- buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
- }
-#endif /* BTR_CUR_HASH_ADAPT */
- buf_flush_dirty_pages(buf_pool, id, observer);
+ buf_flush_dirty_pages(buf_pool_from_array(i), id, observer);
}
if (observer && !observer->is_interrupted()) {
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 4751add93d5..9a7d9eef092 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1649,11 +1649,7 @@ dict_table_rename_in_cache(
return(DB_OUT_OF_MEMORY);
}
- fil_delete_tablespace(table->space
-#ifdef BTR_CUR_HASH_ADAPT
- , true
-#endif /* BTR_CUR_HASH_ADAPT */
- );
+ fil_delete_tablespace(table->space);
/* Delete any temp file hanging around. */
if (os_file_status(filepath, &exists, &ftype)
@@ -2657,28 +2653,13 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do {
- ulint ref_count = btr_search_info_get_ref_count(info, index);
-
- if (ref_count == 0) {
+ if (!btr_search_info_get_ref_count(info, index)) {
break;
}
- /* Sleep for 10ms before trying again. */
- os_thread_sleep(10000);
- ++retries;
-
- if (retries % 500 == 0) {
- /* No luck after 5 seconds of wait. */
- ib::error() << "Waited for " << retries / 100
- << " secs for hash index"
- " ref_count (" << ref_count << ") to drop to 0."
- " index: " << index->name
- << " table: " << table->name;
- }
+ buf_LRU_drop_page_hash_for_tablespace(table);
- /* To avoid a hang here we commit suicide if the
- ref_count doesn't drop to zero in 600 seconds. */
- ut_a(retries < 60000);
+ ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
#endif /* BTR_CUR_HASH_ADAPT */
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 240a453ff5d..c889e8f688e 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -3030,11 +3030,7 @@ fil_delete_tablespace(
To deal with potential read requests, we will check the
::stop_new_ops flag in fil_io(). */
- buf_LRU_flush_or_remove_pages(id, NULL
-#ifdef BTR_CUR_HASH_ADAPT
- , drop_ahi
-#endif /* BTR_CUR_HASH_ADAPT */
- );
+ buf_LRU_flush_or_remove_pages(id, NULL);
/* If it is a delete then also delete any generated files, otherwise
when we drop the database the remove directory will fail. */
@@ -3314,11 +3310,7 @@ fil_discard_tablespace(
{
dberr_t err;
- switch (err = fil_delete_tablespace(id
-#ifdef BTR_CUR_HASH_ADAPT
- , true
-#endif /* BTR_CUR_HASH_ADAPT */
- )) {
+ switch (err = fil_delete_tablespace(id)) {
case DB_SUCCESS:
break;
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 50d5fc84384..d4d5abeb32f 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -3057,7 +3057,7 @@ fseg_free_page_low(
if (ahi) {
btr_search_drop_page_hash_when_freed(
- page_id_t(space->id, offset), page_size);
+ page_id_t(space->id, offset));
}
#endif /* BTR_CUR_HASH_ADAPT */
@@ -3261,8 +3261,7 @@ fseg_free_extent(
btr_search_drop_page_hash_when_freed(
page_id_t(space->id,
- first_page_in_extent + i),
- page_size);
+ first_page_in_extent + i));
}
}
}
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index fad0dac93c4..e6983cacffb 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -129,15 +130,10 @@ btr_search_move_or_delete_hash_entries(
void
btr_search_drop_page_hash_index(buf_block_t* block);
-/** Drop any adaptive hash index entries that may point to an index
-page that may be in the buffer pool, when a page is evicted from the
-buffer pool or freed in a file segment.
-@param[in] page_id page id
-@param[in] page_size page size */
-void
-btr_search_drop_page_hash_when_freed(
- const page_id_t& page_id,
- const page_size_t& page_size);
+/** Drop possible adaptive hash index entries when a page is evicted
+from the buffer pool or freed in a file, or the index is being dropped.
+@param[in] page_id page id */
+void btr_search_drop_page_hash_when_freed(const page_id_t& page_id);
/** Updates the page hash index when a single record is inserted on a page.
@param[in] cursor cursor which was positioned to the place to insert
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index f6a7695a2b5..7ef62e6436d 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -50,18 +50,20 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
+#ifdef BTR_CUR_HASH_ADAPT
+struct dict_table_t;
+/** Drop the adaptive hash index for a tablespace.
+@param[in,out] table table */
+void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
+#else
+# define buf_LRU_drop_page_hash_for_tablespace(table)
+#endif /* BTR_CUR_HASH_ADAPT */
+
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
@param[in,out] observer flush observer,
or NULL if nothing is to be written */
-void
-buf_LRU_flush_or_remove_pages(
- ulint id,
- FlushObserver* observer
-#ifdef BTR_CUR_HASH_ADAPT
- , bool drop_ahi = false /*!< whether to drop the adaptive hash index */
-#endif /* BTR_CUR_HASH_ADAPT */
- );
+void buf_LRU_flush_or_remove_pages(ulint id, FlushObserver* observer);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 0d6f2461c5e..07bd07db864 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -28,6 +28,7 @@ Created 2012-02-08 by Sunny Bains.
#include "row0import.h"
#include "btr0pcur.h"
+#include "btr0sea.h"
#include "que0que.h"
#include "dict0boot.h"
#include "ibuf0ibuf.h"
@@ -3883,6 +3884,17 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}
+ /* On DISCARD TABLESPACE, we did not drop any adaptive hash
+ index entries. If we replaced the discarded tablespace with a
+ smaller one here, there could still be some adaptive hash
+ index entries that point to cached garbage pages in the buffer
+ pool, because PageConverter::operator() only evicted those
+ pages that were replaced by the imported pages. We must
+ discard all remaining adaptive hash index entries, because the
+ adaptive hash index must be a subset of the table contents;
+ false positives are not tolerated. */
+ buf_LRU_drop_page_hash_for_tablespace(table);
+
row_mysql_lock_data_dictionary(trx);
/* If the table is stored in a remote tablespace, we need to
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 80097130cb7..a06fa1ddf47 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -3626,6 +3626,20 @@ row_drop_table_for_mysql(
/* As we don't insert entries to SYSTEM TABLES for temp-tables
we need to avoid running removal of these entries. */
if (!dict_table_is_temporary(table)) {
+ if (table->space != TRX_SYS_SPACE) {
+ /* On DISCARD TABLESPACE, we would not drop the
+ adaptive hash index entries. If the tablespace is
+ missing here, delete-marking the record in SYS_INDEXES
+ would not free any pages in the buffer pool. Thus,
+ dict_index_remove_from_cache() would hang due to
+ adaptive hash index entries existing in the buffer
+ pool. To prevent this hang, and also to guarantee
+ that btr_search_drop_page_hash_when_freed() will avoid
+ calling btr_search_drop_page_hash_index() while we
+ hold the InnoDB dictionary lock, we will drop any
+ adaptive hash index entries upfront. */
+ buf_LRU_drop_page_hash_for_tablespace(table);
+ }
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index adb9d63635d..2cf93533a4f 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -17,7 +17,7 @@
#pragma implementation // gcc: Class implementation
#endif
-#if _MSC_VER>=1400
+#if defined(_MSC_VER) && _MSC_VER>=1400
#define _CRT_SECURE_NO_DEPRECATE 1
#define _CRT_NONSTDC_NO_DEPRECATE 1
#endif
@@ -64,7 +64,7 @@
#define MSG_WAITALL 0
#endif
-#if _MSC_VER>=1400
+#if defined(_MSC_VER) && _MSC_VER>=1400
#pragma warning(push,4)
#endif
@@ -3501,8 +3501,11 @@ int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
// report and bail
if ( sError[0] )
{
- my_error ( ER_CANT_CREATE_TABLE, MYF(0),
- table_arg->s->db.str, table_arg->s->table_name, sError );
+ my_printf_error(ER_CANT_CREATE_TABLE,
+ "Can\'t create table %s.%s (Error: %s)",
+ MYF(0),
+ table_arg->s->db.str,
+ table_arg->s->table_name.str, sError);
SPH_RET(-1);
}
diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc
index 6891583554c..82366a2098f 100644
--- a/storage/xtradb/btr/btr0sea.cc
+++ b/storage/xtradb/btr/btr0sea.cc
@@ -2,6 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
+Copyright (c) 2018, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1287,17 +1288,11 @@ cleanup:
mem_free(folds);
}
-/********************************************************************//**
-Drops a possible page hash index when a page is evicted from the buffer pool
-or freed in a file segment. */
+/** Drop possible adaptive hash index entries when a page is evicted
+from the buffer pool or freed in a file, or the index is being dropped. */
UNIV_INTERN
void
-btr_search_drop_page_hash_when_freed(
-/*=================================*/
- ulint space, /*!< in: space id */
- ulint zip_size, /*!< in: compressed page size in bytes
- or 0 for uncompressed pages */
- ulint page_no) /*!< in: page number */
+btr_search_drop_page_hash_when_freed(ulint space, ulint page_no)
{
buf_block_t* block;
mtr_t mtr;
@@ -1310,7 +1305,7 @@ btr_search_drop_page_hash_when_freed(
are possibly holding, we cannot s-latch the page, but must
(recursively) x-latch it, even though we are only reading. */
- block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL,
+ block = buf_page_get_gen(space, 0, page_no, RW_X_LATCH, NULL,
BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
&mtr);
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 2e25faddebe..b49ee86f78f 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -2972,17 +2972,18 @@ buf_page_get_gen(
#ifdef UNIV_DEBUG
switch (mode) {
case BUF_EVICT_IF_IN_POOL:
+ case BUF_PEEK_IF_IN_POOL:
/* After DISCARD TABLESPACE, the tablespace would not exist,
but in IMPORT TABLESPACE, PageConverter::operator() must
replace any old pages, which were not evicted during DISCARD.
- Skip the assertion on zip_size. */
+ Similarly, btr_search_drop_page_hash_when_freed() must
+ remove any old pages. Skip the assertion on zip_size. */
break;
case BUF_GET_NO_LATCH:
ut_ad(rw_latch == RW_NO_LATCH);
/* fall through */
case BUF_GET:
case BUF_GET_IF_IN_POOL:
- case BUF_PEEK_IF_IN_POOL:
case BUF_GET_IF_IN_POOL_OR_WATCH:
case BUF_GET_POSSIBLY_FREED:
ut_ad(zip_size == fil_space_get_zip_size(space));
@@ -3159,7 +3160,8 @@ got_block:
fix_mutex = buf_page_get_mutex(&fix_block->page);
- ut_ad(page_zip_get_size(&block->page.zip) == zip_size);
+ ut_ad(page_zip_get_size(&block->page.zip) == zip_size
+ || mode == BUF_PEEK_IF_IN_POOL);
switch (mode) {
case BUF_GET_IF_IN_POOL:
diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc
index 7bf423ed740..2c4a4049de6 100644
--- a/storage/xtradb/buf/buf0lru.cc
+++ b/storage/xtradb/buf/buf0lru.cc
@@ -238,8 +238,6 @@ void
buf_LRU_drop_page_hash_batch(
/*=========================*/
ulint space_id, /*!< in: space id */
- ulint zip_size, /*!< in: compressed page size in bytes
- or 0 for uncompressed pages */
const ulint* arr, /*!< in: array of page_no */
ulint count) /*!< in: number of entries in array */
{
@@ -249,8 +247,7 @@ buf_LRU_drop_page_hash_batch(
ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
for (i = 0; i < count; ++i) {
- btr_search_drop_page_hash_when_freed(space_id, zip_size,
- arr[i]);
+ btr_search_drop_page_hash_when_freed(space_id, arr[i]);
}
}
@@ -269,15 +266,6 @@ buf_LRU_drop_page_hash_for_tablespace(
buf_page_t* bpage;
ulint* page_arr;
ulint num_entries;
- ulint zip_size;
-
- zip_size = fil_space_get_zip_size(id);
-
- if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
- /* Somehow, the tablespace does not exist. Nothing to drop. */
- ut_ad(0);
- return;
- }
page_arr = static_cast<ulint*>(ut_malloc(
sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE));
@@ -331,8 +319,7 @@ next_page:
the latching order. */
mutex_exit(&buf_pool->LRU_list_mutex);
- buf_LRU_drop_page_hash_batch(
- id, zip_size, page_arr, num_entries);
+ buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
num_entries = 0;
@@ -363,10 +350,32 @@ next_page:
mutex_exit(&buf_pool->LRU_list_mutex);
/* Drop any remaining batch of search hashed pages. */
- buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
+ buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
ut_free(page_arr);
}
+/** Drop the adaptive hash index for a tablespace.
+@param[in,out] table table */
+UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+{
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+ if (btr_search_info_get_ref_count(btr_search_get_info(index),
+ index)) {
+ goto drop_ahi;
+ }
+ }
+
+ return;
+drop_ahi:
+ ulint id = table->space;
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
+ buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
+ id);
+ }
+}
+
/******************************************************************//**
While flushing (or removing dirty) pages from a tablespace we don't
want to hog the CPU and resources. Release the buffer pool and block
@@ -733,18 +742,11 @@ buf_flush_dirty_pages(buf_pool_t* buf_pool, ulint id, const trx_t* trx)
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
@param[in] trx transaction, for checking for user interrupt;
- or NULL if nothing is to be written
-@param[in] drop_ahi whether to drop the adaptive hash index */
-UNIV_INTERN
-void
-buf_LRU_flush_or_remove_pages(ulint id, const trx_t* trx, bool drop_ahi)
+ or NULL if nothing is to be written */
+UNIV_INTERN void buf_LRU_flush_or_remove_pages(ulint id, const trx_t* trx)
{
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
- buf_pool_t* buf_pool = buf_pool_from_array(i);
- if (drop_ahi) {
- buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
- }
- buf_flush_dirty_pages(buf_pool, id, trx);
+ buf_flush_dirty_pages(buf_pool_from_array(i), id, trx);
}
if (trx && !trx_is_interrupted(trx)) {
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index e361b73ab77..7ade6d79adf 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2017, MariaDB Corporation.
+Copyright (c) 2013, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1680,7 +1680,7 @@ dict_table_rename_in_cache(
filepath = fil_make_ibd_name(table->name, false);
}
- fil_delete_tablespace(table->space, true);
+ fil_delete_tablespace(table->space);
/* Delete any temp file hanging around. */
if (os_file_status(filepath, &exists, &ftype)
@@ -2729,36 +2729,12 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do {
- ulint ref_count = btr_search_info_get_ref_count(info,
- index);
-
- if (ref_count == 0) {
+ if (!btr_search_info_get_ref_count(info, index)) {
break;
}
- /* Sleep for 10ms before trying again. */
- os_thread_sleep(10000);
- ++retries;
-
- if (retries % 500 == 0) {
- /* No luck after 5 seconds of wait. */
- fprintf(stderr, "InnoDB: Error: Waited for"
- " %lu secs for hash index"
- " ref_count (%lu) to drop"
- " to 0.\n"
- "index: \"%s\""
- " table: \"%s\"\n",
- retries/100,
- ref_count,
- index->name,
- table->name);
- }
-
- /* To avoid a hang here we commit suicide if the
- ref_count doesn't drop to zero in 600 seconds. */
- if (retries >= 60000) {
- ut_error;
- }
+ buf_LRU_drop_page_hash_for_tablespace(table);
+ ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
rw_lock_free(&index->lock);
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index ef8ace7f864..4f1235769ba 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -2936,7 +2936,7 @@ fil_delete_tablespace(ulint id, bool drop_ahi)
To deal with potential read requests by checking the
::stop_new_ops flag in fil_io() */
- buf_LRU_flush_or_remove_pages(id, NULL, drop_ahi);
+ buf_LRU_flush_or_remove_pages(id, NULL);
#endif /* !UNIV_HOTBACKUP */
@@ -3047,7 +3047,7 @@ fil_discard_tablespace(
{
dberr_t err;
- switch (err = fil_delete_tablespace(id, true)) {
+ switch (err = fil_delete_tablespace(id)) {
case DB_SUCCESS:
break;
diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc
index 02683e298cb..458a4c2cbca 100644
--- a/storage/xtradb/fsp/fsp0fsp.cc
+++ b/storage/xtradb/fsp/fsp0fsp.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -3035,7 +3035,7 @@ fseg_free_page_low(
/* Drop search system page hash index if the page is found in
the pool and is hashed */
- btr_search_drop_page_hash_when_freed(space, zip_size, page);
+ btr_search_drop_page_hash_when_freed(space, page);
descr = xdes_get_descriptor(space, zip_size, page, mtr);
@@ -3261,7 +3261,7 @@ fseg_free_extent(
found in the pool and is hashed */
btr_search_drop_page_hash_when_freed(
- space, zip_size, first_page_in_extent + i);
+ space, first_page_in_extent + i);
}
}
diff --git a/storage/xtradb/include/btr0sea.h b/storage/xtradb/include/btr0sea.h
index 66c27607013..b7f1c45692a 100644
--- a/storage/xtradb/include/btr0sea.h
+++ b/storage/xtradb/include/btr0sea.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -142,17 +143,11 @@ btr_search_drop_page_hash_index(
s- or x-latched, or an index page
for which we know that
block->buf_fix_count == 0 */
-/********************************************************************//**
-Drops a possible page hash index when a page is evicted from the buffer pool
-or freed in a file segment. */
+/** Drop possible adaptive hash index entries when a page is evicted
+from the buffer pool or freed in a file, or the index is being dropped. */
UNIV_INTERN
void
-btr_search_drop_page_hash_when_freed(
-/*=================================*/
- ulint space, /*!< in: space id */
- ulint zip_size, /*!< in: compressed page size in bytes
- or 0 for uncompressed pages */
- ulint page_no); /*!< in: page number */
+btr_search_drop_page_hash_when_freed(ulint space, ulint page_no);
/********************************************************************//**
Updates the page hash index when a single record is inserted on a page. */
UNIV_INTERN
diff --git a/storage/xtradb/include/buf0lru.h b/storage/xtradb/include/buf0lru.h
index 1bc11937fa1..f0ba1bb227d 100644
--- a/storage/xtradb/include/buf0lru.h
+++ b/storage/xtradb/include/buf0lru.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -36,6 +36,7 @@ Created 11/5/1995 Heikki Tuuri
// Forward declaration
struct trx_t;
+struct dict_table_t;
/******************************************************************//**
Returns TRUE if less than 25 % of the buffer pool is available. This can be
@@ -54,14 +55,15 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
+/** Drop the adaptive hash index for a tablespace.
+@param[in,out] table table */
+UNIV_INTERN void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
+
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
@param[in] trx transaction, for checking for user interrupt;
- or NULL if nothing is to be written
-@param[in] drop_ahi whether to drop the adaptive hash index */
-UNIV_INTERN
-void
-buf_LRU_flush_or_remove_pages(ulint id, const trx_t* trx, bool drop_ahi=false);
+ or NULL if nothing is to be written */
+UNIV_INTERN void buf_LRU_flush_or_remove_pages(ulint id, const trx_t* trx);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc
index 9010ffaaf8b..2315384649d 100644
--- a/storage/xtradb/row/row0import.cc
+++ b/storage/xtradb/row/row0import.cc
@@ -31,6 +31,7 @@ Created 2012-02-08 by Sunny Bains.
#endif
#include "btr0pcur.h"
+#include "btr0sea.h"
#include "que0que.h"
#include "dict0boot.h"
#include "ibuf0ibuf.h"
@@ -3982,6 +3983,17 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}
+ /* On DISCARD TABLESPACE, we did not drop any adaptive hash
+ index entries. If we replaced the discarded tablespace with a
+ smaller one here, there could still be some adaptive hash
+ index entries that point to cached garbage pages in the buffer
+ pool, because PageConverter::operator() only evicted those
+ pages that were replaced by the imported pages. We must
+ discard all remaining adaptive hash index entries, because the
+ adaptive hash index must be a subset of the table contents;
+ false positives are not tolerated. */
+ buf_LRU_drop_page_hash_for_tablespace(table);
+
row_mysql_lock_data_dictionary(trx);
/* If the table is stored in a remote tablespace, we need to
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 1acdfe53e0c..2b6f38ba2af 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -3540,6 +3540,8 @@ row_truncate_table_for_mysql(
fil_space_release(space);
}
+ buf_LRU_drop_page_hash_for_tablespace(table);
+
if (flags != ULINT_UNDEFINED
&& fil_discard_tablespace(space_id) == DB_SUCCESS) {
@@ -4239,6 +4241,21 @@ row_drop_table_for_mysql(
rw_lock_x_unlock(dict_index_get_lock(index));
}
+ if (table->space != TRX_SYS_SPACE) {
+ /* On DISCARD TABLESPACE, we would not drop the
+ adaptive hash index entries. If the tablespace is
+ missing here, delete-marking the record in SYS_INDEXES
+ would not free any pages in the buffer pool. Thus,
+ dict_index_remove_from_cache() would hang due to
+ adaptive hash index entries existing in the buffer
+ pool. To prevent this hang, and also to guarantee
+ that btr_search_drop_page_hash_when_freed() will avoid
+ calling btr_search_drop_page_hash_index() while we
+ hold the InnoDB dictionary lock, we will drop any
+ adaptive hash index entries upfront. */
+ buf_LRU_drop_page_hash_for_tablespace(table);
+ }
+
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc
index 97007c1107c..b6b5d107885 100644
--- a/storage/xtradb/row/row0sel.cc
+++ b/storage/xtradb/row/row0sel.cc
@@ -4665,9 +4665,27 @@ wrong_offs:
ulint lock_type;
+ if (srv_locks_unsafe_for_binlog
+ || trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
+ /* At READ COMMITTED or READ UNCOMMITTED
+ isolation levels, do not lock committed
+ delete-marked records. */
+ if (!rec_get_deleted_flag(rec, comp)) {
+ goto no_gap_lock;
+ }
+ if (trx_id_t trx_id = index == clust_index
+ ? row_get_rec_trx_id(rec, index, offsets)
+ : row_vers_impl_x_locked(rec, index, offsets)) {
+ if (trx_rw_is_active(trx_id, NULL)) {
+ /* The record belongs to an active
+ transaction. We must acquire a lock. */
+ goto no_gap_lock;
+ }
+ }
+ goto locks_ok_del_marked;
+ }
+
if (!set_also_gap_locks
- || srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED
|| (unique_search && !rec_get_deleted_flag(rec, comp))) {
goto no_gap_lock;
@@ -4862,20 +4880,9 @@ locks_ok:
page_rec_is_comp() cannot be used! */
if (rec_get_deleted_flag(rec, comp)) {
-
+locks_ok_del_marked:
/* The record is delete-marked: we can skip it */
- if ((srv_locks_unsafe_for_binlog
- || trx->isolation_level <= TRX_ISO_READ_COMMITTED)
- && prebuilt->select_lock_type != LOCK_NONE
- && !did_semi_consistent_read) {
-
- /* No need to keep a lock on a delete-marked record
- if we do not want to use next-key locking. */
-
- row_unlock_for_mysql(prebuilt, TRUE);
- }
-
/* This is an optimization to skip setting the next key lock
on the record that follows this delete-marked record. This
optimization works because of the unique search criteria