From b4ea18300a08bd6f8e35d2ffdaec41b58ae8fe3b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 11 Jan 2019 23:57:11 +0300 Subject: Apply this patch from Percona Server: commit cd7201514fee78aaf7d3eb2b28d2573c76f53b84 Author: Laurynas Biveinis Date: Tue Nov 14 06:34:19 2017 +0200 Fix bug 1704195 / 87065 / TDB-83 (Stop ANALYZE TABLE from flushing table definition cache) Make ANALYZE TABLE stop flushing affected tables from the table definition cache, which has the effect of not blocking any subsequent new queries involving the table if there's a parallel long-running query: - new table flag HA_ONLINE_ANALYZE, return it for InnoDB and TokuDB tables; - in mysql_admin_table, if we are performing ANALYZE TABLE, and the table flag is set, do not remove the table from the table definition cache, do not invalidate query cache; - in partitioning handler, refresh the query optimizer statistics after ANALYZE if the underlying handler supports HA_ONLINE_ANALYZE; - new testcases main.percona_nonflushing_analyze_debug, parts.percona_nonflushing_abalyze_debug and a supporting debug sync point. For TokuDB, this change exposes bug TDB-83 (Index cardinality stats updated for handler::info(HA_STATUS_CONST), not often enough for tokudb_cardinality_scale_percent). TokuDB may return different rec_per_key values depending on dynamic variable tokudb_cardinality_scale_percent value. The server does not have a way of knowing that changing this variable invalidates the previous rec_per_key values in any opened table shares, and so does not call info(HA_STATUS_CONST) again. Fix by updating rec_per_key for both HA_STATUS_CONST and HA_STATUS_VARIABLE. This also forces a re-record of tokudb.bugs.db756_card_part_hash_1_pick, with the new output seeming to be more correct. --- .../include/percona_nonflushing_analyze_debug.inc | 32 +++++++++++ .../main/percona_nonflushing_analyze_debug.result | 25 +++++++++ .../main/percona_nonflushing_analyze_debug.test | 11 ++++ .../r/percona_nonflushing_analyze_debug.result | 62 ++++++++++++++++++++++ .../parts/t/percona_nonflushing_analyze_debug.test | 29 ++++++++++ sql/ha_partition.cc | 12 ++++- sql/handler.cc | 3 ++ sql/handler.h | 6 +++ sql/sql_admin.cc | 5 +- storage/innobase/handler/ha_innodb.cc | 1 + 10 files changed, 184 insertions(+), 2 deletions(-) create mode 100644 mysql-test/include/percona_nonflushing_analyze_debug.inc create mode 100644 mysql-test/main/percona_nonflushing_analyze_debug.result create mode 100644 mysql-test/main/percona_nonflushing_analyze_debug.test create mode 100644 mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result create mode 100644 mysql-test/suite/parts/t/percona_nonflushing_analyze_debug.test diff --git a/mysql-test/include/percona_nonflushing_analyze_debug.inc b/mysql-test/include/percona_nonflushing_analyze_debug.inc new file mode 100644 index 00000000000..b2f6df51ab8 --- /dev/null +++ b/mysql-test/include/percona_nonflushing_analyze_debug.inc @@ -0,0 +1,32 @@ +# +# Test ANALYZE TABLE that does not flush table definition cache +# Arguments: +# $percona_nonflushing_analyze_table - table to test +# + +--source include/count_sessions.inc + +--connect con1,localhost,root + +SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan"; + +send_eval SELECT * FROM $percona_nonflushing_analyze_table; + +--connection default + +SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress"; + +eval ANALYZE TABLE $percona_nonflushing_analyze_table; + +# With the bug fixed this should not block +eval SELECT * FROM $percona_nonflushing_analyze_table; + +SET DEBUG_SYNC="now SIGNAL finish_scan"; + +--connection con1 +reap; +--disconnect con1 +--connection default +SET DEBUG_SYNC='reset'; + +--source include/wait_until_count_sessions.inc diff --git a/mysql-test/main/percona_nonflushing_analyze_debug.result b/mysql-test/main/percona_nonflushing_analyze_debug.result new file mode 100644 index 00000000000..f78888f1918 --- /dev/null +++ b/mysql-test/main/percona_nonflushing_analyze_debug.result @@ -0,0 +1,25 @@ +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1), (2), (3); +connect con1,localhost,root; +SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan"; +SELECT * FROM t1; +connection default; +SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress"; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SELECT * FROM t1; +a +1 +2 +3 +SET DEBUG_SYNC="now SIGNAL finish_scan"; +connection con1; +a +1 +2 +3 +disconnect con1; +connection default; +SET DEBUG_SYNC='reset'; +DROP TABLE t1; diff --git a/mysql-test/main/percona_nonflushing_analyze_debug.test b/mysql-test/main/percona_nonflushing_analyze_debug.test new file mode 100644 index 00000000000..6d5962ae8a0 --- /dev/null +++ b/mysql-test/main/percona_nonflushing_analyze_debug.test @@ -0,0 +1,11 @@ +--source include/have_debug_sync.inc +--source include/have_innodb.inc + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1), (2), (3); + +--let $percona_nonflushing_analyze_table= t1 +--source include/percona_nonflushing_analyze_debug.inc + +DROP TABLE t1; + diff --git a/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result b/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result new file mode 100644 index 00000000000..f4ee86d7620 --- /dev/null +++ b/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result @@ -0,0 +1,62 @@ +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (3), +PARTITION p1 VALUES LESS THAN (10)); +INSERT INTO t1 VALUES (1), (2), (3), (4); +connect con1,localhost,root; +SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan"; +SELECT * FROM t1; +connection default; +SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress"; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SELECT * FROM t1; +a +1 +2 +3 +4 +SET DEBUG_SYNC="now SIGNAL finish_scan"; +connection con1; +a +1 +2 +3 +4 +disconnect con1; +connection default; +SET DEBUG_SYNC='reset'; +DROP TABLE t1; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB +PARTITION BY RANGE (a) +SUBPARTITION BY HASH (A) +SUBPARTITIONS 2 ( +PARTITION p0 VALUES LESS THAN (3), +PARTITION p1 VALUES LESS THAN (10)); +INSERT INTO t2 VALUES (1), (2), (3), (4); +connect con1,localhost,root; +SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan"; +SELECT * FROM t2; +connection default; +SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress"; +ANALYZE TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status OK +SELECT * FROM t2; +a +1 +2 +3 +4 +SET DEBUG_SYNC="now SIGNAL finish_scan"; +connection con1; +a +1 +2 +3 +4 +disconnect con1; +connection default; +SET DEBUG_SYNC='reset'; +DROP TABLE t2; diff --git a/mysql-test/suite/parts/t/percona_nonflushing_analyze_debug.test b/mysql-test/suite/parts/t/percona_nonflushing_analyze_debug.test new file mode 100644 index 00000000000..61c0a278ebb --- /dev/null +++ b/mysql-test/suite/parts/t/percona_nonflushing_analyze_debug.test @@ -0,0 +1,29 @@ +--source include/have_debug_sync.inc +--source include/have_innodb.inc +--source include/have_partition.inc + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB + PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (3), + PARTITION p1 VALUES LESS THAN (10)); + +INSERT INTO t1 VALUES (1), (2), (3), (4); + +--let $percona_nonflushing_analyze_table= t1 +--source include/percona_nonflushing_analyze_debug.inc + +DROP TABLE t1; + +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB + PARTITION BY RANGE (a) + SUBPARTITION BY HASH (A) + SUBPARTITIONS 2 ( + PARTITION p0 VALUES LESS THAN (3), + PARTITION p1 VALUES LESS THAN (10)); + +INSERT INTO t2 VALUES (1), (2), (3), (4); + +--let $percona_nonflushing_analyze_table= t2 +--source include/percona_nonflushing_analyze_debug.inc + +DROP TABLE t2; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 1349571a3f8..361dacb51f4 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1162,7 +1162,17 @@ int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt) { DBUG_ENTER("ha_partition::analyze"); - DBUG_RETURN(handle_opt_partitions(thd, check_opt, ANALYZE_PARTS)); + int result= handle_opt_partitions(thd, check_opt, ANALYZE_PARTS); + + if ((result == 0) && m_file[0] + && (m_file[0]->ha_table_flags() & HA_ONLINE_ANALYZE)) + { + /* If this is ANALYZE TABLE that will not force table definition cache + eviction, update statistics for the partition handler. */ + this->info(HA_STATUS_CONST | HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); + } + + DBUG_RETURN(result); } diff --git a/sql/handler.cc b/sql/handler.cc index 897d468f2ba..08735f284d8 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2880,6 +2880,9 @@ int handler::ha_index_next(uchar * buf) table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ); } table->status=result ? STATUS_NOT_FOUND: 0; + + DEBUG_SYNC(ha_thd(), "handler_ha_index_next_end"); + DBUG_RETURN(result); } diff --git a/sql/handler.h b/sql/handler.h index a4e81ea0365..2e469e23e04 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -304,6 +304,12 @@ enum enum_alter_inplace_result { /* Engine wants primary keys for everything except sequences */ #define HA_WANTS_PRIMARY_KEY (1ULL << 55) +/* + There is no need to evict the table from the table definition cache having + run ANALYZE TABLE on it + */ +#define HA_ONLINE_ANALYZE (1ULL << 56) + /* bits in index_flags(index_number) for what you can do with index */ #define HA_READ_NEXT 1 /* TODO really use this flag */ #define HA_READ_PREV 2 /* supports ::index_prev */ diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index c17567e6a89..221220ba3d4 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -1143,6 +1143,9 @@ send_result_message: } if (table->table && !table->view) { + const bool skip_flush= + (operator_func == &handler::ha_analyze) + && (table->table->file->ha_table_flags() & HA_ONLINE_ANALYZE); if (table->table->s->tmp_table) { /* @@ -1152,7 +1155,7 @@ send_result_message: if (open_for_modify && !open_error) table->table->file->info(HA_STATUS_CONST); } - else if (open_for_modify || fatal_error) + else if ((!skip_flush && open_for_modify) || fatal_error) { tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED, table->db.str, table->table_name.str, FALSE); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 84ae2580e4f..94b533f6b06 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2912,6 +2912,7 @@ ha_innobase::ha_innobase( | HA_CAN_FULLTEXT_HINTS */ | HA_CAN_EXPORT + | HA_ONLINE_ANALYZE | HA_CAN_RTREEKEYS | HA_CAN_TABLES_WITHOUT_ROLLBACK | HA_CONCURRENT_OPTIMIZE -- cgit v1.2.1